ext4: Allow parallel DIO reads
We can easily support parallel direct IO reads. We only have to make sure we cannot expose uninitialized data by reading allocated block to which data was not written yet, or which was already truncated. That is easily achieved by holding inode_lock in shared mode - that excludes all writes, truncates, hole punches. We also have to guard against page writeback allocating blocks for delay-allocated pages - that race is handled by the fact that we writeback all the pages in the affected range and the lock protects us from new pages being created there. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
cca32b7eeb
commit
16c5468859
1 changed files with 18 additions and 22 deletions
|
@ -3528,35 +3528,31 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
|
|||
|
||||
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
|
||||
{
|
||||
int unlocked = 0;
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
if (ext4_should_dioread_nolock(inode)) {
|
||||
/*
|
||||
* Nolock dioread optimization may be dynamically disabled
|
||||
* via ext4_inode_block_unlocked_dio(). Check inode's state
|
||||
* while holding extra i_dio_count ref.
|
||||
*/
|
||||
inode_dio_begin(inode);
|
||||
smp_mb();
|
||||
if (unlikely(ext4_test_inode_state(inode,
|
||||
EXT4_STATE_DIOREAD_LOCK)))
|
||||
inode_dio_end(inode);
|
||||
else
|
||||
unlocked = 1;
|
||||
}
|
||||
/*
|
||||
* Shared inode_lock is enough for us - it protects against concurrent
|
||||
* writes & truncates and since we take care of writing back page cache,
|
||||
* we are protected against page writeback as well.
|
||||
*/
|
||||
inode_lock_shared(inode);
|
||||
if (IS_DAX(inode)) {
|
||||
ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block,
|
||||
NULL, unlocked ? 0 : DIO_LOCKING);
|
||||
ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
|
||||
} else {
|
||||
size_t count = iov_iter_count(iter);
|
||||
|
||||
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
|
||||
iocb->ki_pos + count);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
|
||||
iter, ext4_dio_get_block,
|
||||
NULL, NULL,
|
||||
unlocked ? 0 : DIO_LOCKING);
|
||||
NULL, NULL, 0);
|
||||
}
|
||||
if (unlocked)
|
||||
inode_dio_end(inode);
|
||||
out_unlock:
|
||||
inode_unlock_shared(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue