fs: Protect write paths by sb_start_write - sb_end_write
There are several entry points which dirty pages in a filesystem. mmap (handled by block_page_mkwrite()), buffered write (handled by __generic_file_aio_write()), splice write (generic_file_splice_write), truncate, and fallocate (these can dirty last partial page - handled inside each filesystem separately). Protect these places with sb_start_write() and sb_end_write(). ->page_mkwrite() calls are particularly complex since they are called with mmap_sem held and thus we cannot use standard sb_start_write() due to lock ordering constraints. We solve the problem by using a special freeze protection sb_start_pagefault() which ranks below mmap_sem. BugLink: https://bugs.launchpad.net/bugs/897421 Tested-by: Kamal Mostafa <kamal@canonical.com> Tested-by: Peter M. Petrakis <peter.petrakis@canonical.com> Tested-by: Dann Frazier <dann.frazier@canonical.com> Tested-by: Massimo Morana <massimo.morana@canonical.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
5d37e9e6de
commit
14da920014
5 changed files with 26 additions and 23 deletions
22
fs/buffer.c
22
fs/buffer.c
|
@ -2306,8 +2306,8 @@ EXPORT_SYMBOL(block_commit_write);
|
||||||
* beyond EOF, then the page is guaranteed safe against truncation until we
|
* beyond EOF, then the page is guaranteed safe against truncation until we
|
||||||
* unlock the page.
|
* unlock the page.
|
||||||
*
|
*
|
||||||
* Direct callers of this function should call vfs_check_frozen() so that page
|
* Direct callers of this function should protect against filesystem freezing
|
||||||
* fault does not busyloop until the fs is thawed.
|
* using sb_start_write() - sb_end_write() functions.
|
||||||
*/
|
*/
|
||||||
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
get_block_t get_block)
|
get_block_t get_block)
|
||||||
|
@ -2345,18 +2345,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
|
|
||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
/*
|
|
||||||
* Freezing in progress? We check after the page is marked dirty and
|
|
||||||
* with page lock held so if the test here fails, we are sure freezing
|
|
||||||
* code will wait during syncing until the page fault is done - at that
|
|
||||||
* point page will be dirty and unlocked so freezing code will write it
|
|
||||||
* and writeprotect it again.
|
|
||||||
*/
|
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
if (inode->i_sb->s_frozen != SB_UNFROZEN) {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
return 0;
|
return 0;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -2371,12 +2360,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
int ret;
|
int ret;
|
||||||
struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
|
struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
|
||||||
|
|
||||||
/*
|
sb_start_pagefault(sb);
|
||||||
* This check is racy but catches the common case. The check in
|
|
||||||
* __block_page_mkwrite() is reliable.
|
|
||||||
*/
|
|
||||||
vfs_check_frozen(sb, SB_FREEZE_WRITE);
|
|
||||||
ret = __block_page_mkwrite(vma, vmf, get_block);
|
ret = __block_page_mkwrite(vma, vmf, get_block);
|
||||||
|
sb_end_pagefault(sb);
|
||||||
return block_page_mkwrite_return(ret);
|
return block_page_mkwrite_return(ret);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(block_page_mkwrite);
|
EXPORT_SYMBOL(block_page_mkwrite);
|
||||||
|
|
|
@ -164,11 +164,13 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
|
||||||
if (IS_APPEND(inode))
|
if (IS_APPEND(inode))
|
||||||
goto out_putf;
|
goto out_putf;
|
||||||
|
|
||||||
|
sb_start_write(inode->i_sb);
|
||||||
error = locks_verify_truncate(inode, file, length);
|
error = locks_verify_truncate(inode, file, length);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = security_path_truncate(&file->f_path);
|
error = security_path_truncate(&file->f_path);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
|
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
|
||||||
|
sb_end_write(inode->i_sb);
|
||||||
out_putf:
|
out_putf:
|
||||||
fput(file);
|
fput(file);
|
||||||
out:
|
out:
|
||||||
|
@ -266,7 +268,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||||
if (!file->f_op->fallocate)
|
if (!file->f_op->fallocate)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
return file->f_op->fallocate(file, mode, offset, len);
|
sb_start_write(inode->i_sb);
|
||||||
|
ret = file->f_op->fallocate(file, mode, offset, len);
|
||||||
|
sb_end_write(inode->i_sb);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
|
SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
|
||||||
|
|
|
@ -996,6 +996,8 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
||||||
};
|
};
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
|
sb_start_write(inode->i_sb);
|
||||||
|
|
||||||
pipe_lock(pipe);
|
pipe_lock(pipe);
|
||||||
|
|
||||||
splice_from_pipe_begin(&sd);
|
splice_from_pipe_begin(&sd);
|
||||||
|
@ -1034,6 +1036,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
||||||
*ppos += ret;
|
*ppos += ret;
|
||||||
balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
|
balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
|
||||||
}
|
}
|
||||||
|
sb_end_write(inode->i_sb);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
12
mm/filemap.c
12
mm/filemap.c
|
@ -1718,6 +1718,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
||||||
int ret = VM_FAULT_LOCKED;
|
int ret = VM_FAULT_LOCKED;
|
||||||
|
|
||||||
|
sb_start_pagefault(inode->i_sb);
|
||||||
file_update_time(vma->vm_file);
|
file_update_time(vma->vm_file);
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
if (page->mapping != inode->i_mapping) {
|
if (page->mapping != inode->i_mapping) {
|
||||||
|
@ -1725,7 +1726,14 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
ret = VM_FAULT_NOPAGE;
|
ret = VM_FAULT_NOPAGE;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* We mark the page dirty already here so that when freeze is in
|
||||||
|
* progress, we are guaranteed that writeback during freezing will
|
||||||
|
* see the dirty page and writeprotect it again.
|
||||||
|
*/
|
||||||
|
set_page_dirty(page);
|
||||||
out:
|
out:
|
||||||
|
sb_end_pagefault(inode->i_sb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(filemap_page_mkwrite);
|
EXPORT_SYMBOL(filemap_page_mkwrite);
|
||||||
|
@ -2426,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
count = ocount;
|
count = ocount;
|
||||||
pos = *ppos;
|
pos = *ppos;
|
||||||
|
|
||||||
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
|
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = mapping->backing_dev_info;
|
||||||
written = 0;
|
written = 0;
|
||||||
|
@ -2526,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
|
||||||
BUG_ON(iocb->ki_pos != pos);
|
BUG_ON(iocb->ki_pos != pos);
|
||||||
|
|
||||||
|
sb_start_write(inode->i_sb);
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||||
|
@ -2539,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
sb_end_write(inode->i_sb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(generic_file_aio_write);
|
EXPORT_SYMBOL(generic_file_aio_write);
|
||||||
|
|
|
@ -402,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
|
||||||
loff_t pos;
|
loff_t pos;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
|
sb_start_write(inode->i_sb);
|
||||||
|
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
|
|
||||||
if (!access_ok(VERIFY_READ, buf, len)) {
|
if (!access_ok(VERIFY_READ, buf, len)) {
|
||||||
|
@ -412,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
|
||||||
pos = *ppos;
|
pos = *ppos;
|
||||||
count = len;
|
count = len;
|
||||||
|
|
||||||
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
|
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = mapping->backing_dev_info;
|
||||||
|
|
||||||
|
@ -437,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
|
||||||
current->backing_dev_info = NULL;
|
current->backing_dev_info = NULL;
|
||||||
out_up:
|
out_up:
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
|
sb_end_write(inode->i_sb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xip_file_write);
|
EXPORT_SYMBOL_GPL(xip_file_write);
|
||||||
|
|
Loading…
Reference in a new issue