staging/lustre/llite: handle io init failure in ll_fault_io_init()
In ll_fault_io_init(), if cl_io_init() has failed then cleanup and return an ERR_PTR(). This fixes an oops in the page fault handling code when a partially initialized io is used. In ll_page_mkwrite0() do not call cl_io_fini() on an ERR_PTR(). Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3487 Lustre-change: http://review.whamcloud.com/6735 Signed-off-by: John L. Hammond <john.hammond@intel.com> Reviewed-by: Lai Siyao <lai.siyao@intel.com> Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com> Reviewed-by: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: Peng Tao <tao.peng@emc.com> Signed-off-by: Andreas Dilger <andreas.dilger@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
78eb909271
commit
8a48df7080
1 changed files with 21 additions and 15 deletions
|
@ -111,6 +111,7 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
|
|||
struct cl_io *io;
|
||||
struct cl_fault_io *fio;
|
||||
struct lu_env *env;
|
||||
int rc;
|
||||
ENTRY;
|
||||
|
||||
*env_ret = NULL;
|
||||
|
@ -151,17 +152,22 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
|
|||
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
|
||||
fio->ft_index, fio->ft_executable);
|
||||
|
||||
if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
|
||||
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
|
||||
if (rc == 0) {
|
||||
struct ccc_io *cio = ccc_env_io(env);
|
||||
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
|
||||
|
||||
LASSERT(cio->cui_cl.cis_io == io);
|
||||
|
||||
/* mmap lock must be MANDATORY
|
||||
* it has to cache pages. */
|
||||
/* mmap lock must be MANDATORY it has to cache
|
||||
* pages. */
|
||||
io->ci_lockreq = CILR_MANDATORY;
|
||||
|
||||
cio->cui_fd = fd;
|
||||
} else {
|
||||
LASSERT(rc < 0);
|
||||
cl_io_fini(env, io);
|
||||
cl_env_nested_put(nest, env);
|
||||
io = ERR_PTR(rc);
|
||||
}
|
||||
|
||||
return io;
|
||||
|
@ -189,7 +195,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
|
|||
|
||||
result = io->ci_result;
|
||||
if (result < 0)
|
||||
GOTO(out, result);
|
||||
GOTO(out_io, result);
|
||||
|
||||
io->u.ci_fault.ft_mkwrite = 1;
|
||||
io->u.ci_fault.ft_writable = 1;
|
||||
|
@ -251,14 +257,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
|
|||
}
|
||||
EXIT;
|
||||
|
||||
out:
|
||||
out_io:
|
||||
cl_io_fini(env, io);
|
||||
cl_env_nested_put(&nest, env);
|
||||
|
||||
out:
|
||||
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
|
||||
|
||||
LASSERT(ergo(result == 0, PageLocked(vmpage)));
|
||||
return(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue