/* * fs/f2fs/data.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include "f2fs.h" #include "node.h" #include "segment.h" #include /* * Low-level block read/write IO operations. */ static struct bio *__bio_alloc(struct block_device *bdev, int npages) { struct bio *bio; /* No failure on bio allocation */ bio = bio_alloc(GFP_NOIO, npages); bio->bi_bdev = bdev; bio->bi_private = NULL; return bio; } static void f2fs_read_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; do { struct page *page = bvec->bv_page; if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); if (uptodate) { SetPageUptodate(page); } else { ClearPageUptodate(page); SetPageError(page); } unlock_page(page); } while (bvec >= bio->bi_io_vec); bio_put(bio); } static void f2fs_write_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb); do { struct page *page = bvec->bv_page; if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); if (!uptodate) { SetPageError(page); set_bit(AS_EIO, &page->mapping->flags); set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); sbi->sb->s_flags |= MS_RDONLY; } end_page_writeback(page); dec_page_count(sbi, F2FS_WRITEBACK); } while (bvec >= bio->bi_io_vec); if (bio->bi_private) complete(bio->bi_private); if (!get_pages(sbi, F2FS_WRITEBACK) && !list_empty(&sbi->cp_wait.task_list)) wake_up(&sbi->cp_wait); bio_put(bio); } static void __submit_merged_bio(struct f2fs_sb_info *sbi, struct f2fs_bio_info *io, enum page_type type, bool sync, int rw) { enum page_type btype = PAGE_TYPE_OF_BIO(type); if (!io->bio) return; if (btype == META) rw |= REQ_META; if (is_read_io(rw)) { if (sync) rw |= READ_SYNC; submit_bio(rw, io->bio); trace_f2fs_submit_read_bio(sbi->sb, rw, type, io->bio); io->bio = NULL; return; } if (sync) rw |= WRITE_SYNC; if (type >= META_FLUSH) rw |= WRITE_FLUSH_FUA; /* * META_FLUSH is only from the checkpoint procedure, and we should wait * this metadata bio for FS consistency. */ if (type == META_FLUSH) { DECLARE_COMPLETION_ONSTACK(wait); io->bio->bi_private = &wait; submit_bio(rw, io->bio); wait_for_completion(&wait); } else { submit_bio(rw, io->bio); } trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio); io->bio = NULL; } void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync, int rw) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io; io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; mutex_lock(&io->io_mutex); __submit_merged_bio(sbi, io, type, sync, rw); mutex_unlock(&io->io_mutex); } /* * Fill the locked page with data located in the block address. * Return unlocked page. */ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, block_t blk_addr, int rw) { struct block_device *bdev = sbi->sb->s_bdev; struct bio *bio; trace_f2fs_submit_page_bio(page, blk_addr, rw); /* Allocate a new bio */ bio = __bio_alloc(bdev, 1); /* Initialize the bio */ bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io; if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_put(bio); f2fs_put_page(page, 1); return -EFAULT; } submit_bio(rw, bio); return 0; } void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, block_t blk_addr, enum page_type type, int rw) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct block_device *bdev = sbi->sb->s_bdev; struct f2fs_bio_info *io; int bio_blocks; io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; verify_block_addr(sbi, blk_addr); mutex_lock(&io->io_mutex); if (!is_read_io(rw)) inc_page_count(sbi, F2FS_WRITEBACK); if (io->bio && io->last_block_in_bio != blk_addr - 1) __submit_merged_bio(sbi, io, type, true, rw); alloc_new: if (io->bio == NULL) { bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); io->bio = __bio_alloc(bdev, bio_blocks); io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io; /* * The end_io will be assigned at the sumbission phase. * Until then, let bio_add_page() merge consecutive IOs as much * as possible. */ } if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { __submit_merged_bio(sbi, io, type, true, rw); goto alloc_new; } io->last_block_in_bio = blk_addr; mutex_unlock(&io->io_mutex); trace_f2fs_submit_page_mbio(page, rw, type, blk_addr); } /* * Lock ordering for the change of data block address: * ->data_page * ->node_page * update block addresses in the node page */ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) { struct f2fs_node *rn; __le32 *addr_array; struct page *node_page = dn->node_page; unsigned int ofs_in_node = dn->ofs_in_node; f2fs_wait_on_page_writeback(node_page, NODE, false); rn = F2FS_NODE(node_page); /* Get physical address of data block */ addr_array = blkaddr_in_node(rn); addr_array[ofs_in_node] = cpu_to_le32(new_addr); set_page_dirty(node_page); } int reserve_new_block(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) return -EPERM; if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); __set_data_blkaddr(dn, NEW_ADDR); dn->data_blkaddr = NEW_ADDR; sync_inode_page(dn); return 0; } int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) { bool need_put = dn->inode_page ? false : true; int err; err = get_dnode_of_data(dn, index, ALLOC_NODE); if (err) return err; if (dn->data_blkaddr == NULL_ADDR) err = reserve_new_block(dn); if (need_put) f2fs_put_dnode(dn); return err; } static int check_extent_cache(struct inode *inode, pgoff_t pgofs, struct buffer_head *bh_result) { struct f2fs_inode_info *fi = F2FS_I(inode); pgoff_t start_fofs, end_fofs; block_t start_blkaddr; if (is_inode_flag_set(fi, FI_NO_EXTENT)) return 0; read_lock(&fi->ext.ext_lock); if (fi->ext.len == 0) { read_unlock(&fi->ext.ext_lock); return 0; } stat_inc_total_hit(inode->i_sb); start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; if (pgofs >= start_fofs && pgofs <= end_fofs) { unsigned int blkbits = inode->i_sb->s_blocksize_bits; size_t count; clear_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, start_blkaddr + pgofs - start_fofs); count = end_fofs - pgofs + 1; if (count < (UINT_MAX >> blkbits)) bh_result->b_size = (count << blkbits); else bh_result->b_size = UINT_MAX; stat_inc_read_hit(inode->i_sb); read_unlock(&fi->ext.ext_lock); return 1; } read_unlock(&fi->ext.ext_lock); return 0; } void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) { struct f2fs_inode_info *fi = F2FS_I(dn->inode); pgoff_t fofs, start_fofs, end_fofs; block_t start_blkaddr, end_blkaddr; int need_update = true; f2fs_bug_on(blk_addr == NEW_ADDR); fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + dn->ofs_in_node; /* Update the page address in the parent node */ __set_data_blkaddr(dn, blk_addr); if (is_inode_flag_set(fi, FI_NO_EXTENT)) return; write_lock(&fi->ext.ext_lock); start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; /* Drop and initialize the matched extent */ if (fi->ext.len == 1 && fofs == start_fofs) fi->ext.len = 0; /* Initial extent */ if (fi->ext.len == 0) { if (blk_addr != NULL_ADDR) { fi->ext.fofs = fofs; fi->ext.blk_addr = blk_addr; fi->ext.len = 1; } goto end_update; } /* Front merge */ if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { fi->ext.fofs--; fi->ext.blk_addr--; fi->ext.len++; goto end_update; } /* Back merge */ if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { fi->ext.len++; goto end_update; } /* Split the existing extent */ if (fi->ext.len > 1 && fofs >= start_fofs && fofs <= end_fofs) { if ((end_fofs - fofs) < (fi->ext.len >> 1)) { fi->ext.len = fofs - start_fofs; } else { fi->ext.fofs = fofs + 1; fi->ext.blk_addr = start_blkaddr + fofs - start_fofs + 1; fi->ext.len -= fofs - start_fofs + 1; } } else { need_update = false; } /* Finally, if the extent is very fragmented, let's drop the cache. */ if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { fi->ext.len = 0; set_inode_flag(fi, FI_NO_EXTENT); need_update = true; } end_update: write_unlock(&fi->ext.ext_lock); if (need_update) sync_inode_page(dn); return; } struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; int err; page = find_get_page(mapping, index); if (page && PageUptodate(page)) return page; f2fs_put_page(page, 0); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) return ERR_PTR(err); f2fs_put_dnode(&dn); if (dn.data_blkaddr == NULL_ADDR) return ERR_PTR(-ENOENT); /* By fallocate(), there is no cached page, but with NEW_ADDR */ if (dn.data_blkaddr == NEW_ADDR) return ERR_PTR(-EINVAL); page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); if (!page) return ERR_PTR(-ENOMEM); if (PageUptodate(page)) { unlock_page(page); return page; } err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, sync ? READ_SYNC : READA); if (err) return ERR_PTR(err); if (sync) { wait_on_page_locked(page); if (!PageUptodate(page)) { f2fs_put_page(page, 0); return ERR_PTR(-EIO); } } return page; } /* * If it tries to access a hole, return an error. * Because, the callers, functions in dir.c and GC, should be able to know * whether this page exists or not. */ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; int err; repeat: page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); if (!page) return ERR_PTR(-ENOMEM); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) { f2fs_put_page(page, 1); return ERR_PTR(err); } f2fs_put_dnode(&dn); if (dn.data_blkaddr == NULL_ADDR) { f2fs_put_page(page, 1); return ERR_PTR(-ENOENT); } if (PageUptodate(page)) return page; /* * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return page; } err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); if (err) return ERR_PTR(err); lock_page(page); if (!PageUptodate(page)) { f2fs_put_page(page, 1); return ERR_PTR(-EIO); } if (page->mapping != mapping) { f2fs_put_page(page, 1); goto repeat; } return page; } /* * Caller ensures that this data page is never allocated. * A new zero-filled data page is allocated in the page cache. * * Also, caller should grab and release a mutex by calling mutex_lock_op() and * mutex_unlock_op(). * Note that, npage is set only by make_empty_dir. */ struct page *get_new_data_page(struct inode *inode, struct page *npage, pgoff_t index, bool new_i_size) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct page *page; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, npage, npage, 0); err = f2fs_reserve_block(&dn, index); if (err) return ERR_PTR(err); repeat: page = grab_cache_page(mapping, index); if (!page) return ERR_PTR(-ENOMEM); if (PageUptodate(page)) return page; if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); } else { err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); if (err) return ERR_PTR(err); lock_page(page); if (!PageUptodate(page)) { f2fs_put_page(page, 1); return ERR_PTR(-EIO); } if (page->mapping != mapping) { f2fs_put_page(page, 1); goto repeat; } } if (new_i_size && i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); /* Only the directory inode sets new_i_size */ set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); mark_inode_dirty_sync(inode); } return page; } /* * This function should be used by the data read flow only where it * does not check the "create" flag that indicates block allocation. * The reason for this special functionality is to exploit VFS readahead * mechanism. */ static int get_data_block_ro(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { unsigned int blkbits = inode->i_sb->s_blocksize_bits; unsigned maxblocks = bh_result->b_size >> blkbits; struct dnode_of_data dn; pgoff_t pgofs; int err; /* Get the page offset from the block offset(iblock) */ pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); if (check_extent_cache(inode, pgofs, bh_result)) { trace_f2fs_get_data_block(inode, iblock, bh_result, 0); return 0; } /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err) { trace_f2fs_get_data_block(inode, iblock, bh_result, err); return (err == -ENOENT) ? 0 : err; } /* It does not support data allocation */ f2fs_bug_on(create); if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) { int i; unsigned int end_offset; end_offset = IS_INODE(dn.node_page) ? ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK; clear_buffer_new(bh_result); /* Give more consecutive addresses for the read ahead */ for (i = 0; i < end_offset - dn.ofs_in_node; i++) if (((datablock_addr(dn.node_page, dn.ofs_in_node + i)) != (dn.data_blkaddr + i)) || maxblocks == i) break; map_bh(bh_result, inode->i_sb, dn.data_blkaddr); bh_result->b_size = (((size_t)i) << blkbits); } f2fs_put_dnode(&dn); trace_f2fs_get_data_block(inode, iblock, bh_result, 0); return 0; } static int f2fs_read_data_page(struct file *file, struct page *page) { return mpage_readpage(page, get_data_block_ro); } static int f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro); } int do_write_data_page(struct page *page) { struct inode *inode = page->mapping->host; block_t old_blk_addr, new_blk_addr; struct dnode_of_data dn; int err = 0; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); if (err) return err; old_blk_addr = dn.data_blkaddr; /* This page is already truncated */ if (old_blk_addr == NULL_ADDR) goto out_writepage; set_page_writeback(page); /* * If current allocation needs SSR, * it had better in-place writes for updated data. */ if (unlikely(old_blk_addr != NEW_ADDR && !is_cold_data(page) && need_inplace_update(inode))) { rewrite_data_page(F2FS_SB(inode->i_sb), page, old_blk_addr); } else { write_data_page(inode, page, &dn, old_blk_addr, &new_blk_addr); update_extent_cache(new_blk_addr, &dn); } out_writepage: f2fs_put_dnode(&dn); return err; } static int f2fs_write_data_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long) i_size) >> PAGE_CACHE_SHIFT; unsigned offset; bool need_balance_fs = false; int err = 0; if (page->index < end_index) goto write; /* * If the offset is out-of-range of file size, * this page does not have to be written to disk. */ offset = i_size & (PAGE_CACHE_SIZE - 1); if ((page->index >= end_index + 1) || !offset) { if (S_ISDIR(inode->i_mode)) { dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(inode); } goto out; } zero_user_segment(page, offset, PAGE_CACHE_SIZE); write: if (unlikely(sbi->por_doing)) { err = AOP_WRITEPAGE_ACTIVATE; goto redirty_out; } /* Dentry blocks are controlled by checkpoint */ if (S_ISDIR(inode->i_mode)) { dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(inode); err = do_write_data_page(page); } else { f2fs_lock_op(sbi); err = do_write_data_page(page); f2fs_unlock_op(sbi); need_balance_fs = true; } if (err == -ENOENT) goto out; else if (err) goto redirty_out; if (wbc->for_reclaim) f2fs_submit_merged_bio(sbi, DATA, true, WRITE); clear_cold_data(page); out: unlock_page(page); if (need_balance_fs) f2fs_balance_fs(sbi); return 0; redirty_out: wbc->pages_skipped++; set_page_dirty(page); return err; } #define MAX_DESIRED_PAGES_WP 4096 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } static int f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); bool locked = false; int ret; long excess_nrtw = 0, desired_nrtw; /* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) return 0; if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) { desired_nrtw = MAX_DESIRED_PAGES_WP; excess_nrtw = desired_nrtw - wbc->nr_to_write; wbc->nr_to_write = desired_nrtw; } if (!S_ISDIR(inode->i_mode)) { mutex_lock(&sbi->writepages); locked = true; } ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); if (locked) mutex_unlock(&sbi->writepages); f2fs_submit_merged_bio(sbi, DATA, wbc->sync_mode == WB_SYNC_ALL, WRITE); remove_dirty_dir_inode(inode); wbc->nr_to_write -= excess_nrtw; return ret; } static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct page *page; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; f2fs_balance_fs(sbi); repeat: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_reserve_block(&dn, index); f2fs_unlock_op(sbi); if (err) { f2fs_put_page(page, 1); return err; } if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) return 0; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); goto out; } if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); if (err) return err; lock_page(page); if (!PageUptodate(page)) { f2fs_put_page(page, 1); return -EIO; } if (page->mapping != mapping) { f2fs_put_page(page, 1); goto repeat; } } out: SetPageUptodate(page); clear_cold_data(page); return 0; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; SetPageUptodate(page); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_put_page(page, 1); return copied; } static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; if (rw == WRITE) return 0; /* Needs synchronization with the cleaner */ return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, get_data_block_ro); } static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, unsigned int length) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); if (S_ISDIR(inode->i_mode) && PageDirty(page)) { dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(inode); } ClearPagePrivate(page); } static int f2fs_release_data_page(struct page *page, gfp_t wait) { ClearPagePrivate(page); return 1; } static int f2fs_set_data_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; trace_f2fs_set_page_dirty(page, DATA); SetPageUptodate(page); if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); set_dirty_dir_page(inode, page); return 1; } return 0; } static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, get_data_block_ro); } const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, .readpages = f2fs_read_data_pages, .writepage = f2fs_write_data_page, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, .write_end = f2fs_write_end, .set_page_dirty = f2fs_set_data_page_dirty, .invalidatepage = f2fs_invalidate_data_page, .releasepage = f2fs_release_data_page, .direct_IO = f2fs_direct_IO, .bmap = f2fs_bmap, };