vfs: use kmalloc() to allocate fdmem if possible
Use kmalloc() to allocate fdmem if possible. vmalloc() is used as a fallback solution for fdmem allocation. A new helper function __free_fdtable() is introduced to reduce the lines of code. A potential bug, vfree() a memory allocated by kmalloc(), is fixed. [akpm@linux-foundation.org: use __GFP_NOWARN, uninline alloc_fdmem() and free_fdmem()] Signed-off-by: Changli Gao <xiaosuo@gmail.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Jiri Slaby <jslaby@suse.cz> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Avi Kivity <avi@redhat.com> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
06b1e104b7
commit
a892e2d7dc
1 changed files with 23 additions and 34 deletions
53
fs/file.c
53
fs/file.c
|
@ -41,26 +41,25 @@ static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
|
||||||
|
|
||||||
static inline void *alloc_fdmem(unsigned int size)
|
static inline void *alloc_fdmem(unsigned int size)
|
||||||
{
|
{
|
||||||
if (size <= PAGE_SIZE)
|
void *data;
|
||||||
return kmalloc(size, GFP_KERNEL);
|
|
||||||
else
|
data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
|
||||||
|
if (data != NULL)
|
||||||
|
return data;
|
||||||
|
|
||||||
return vmalloc(size);
|
return vmalloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_fdarr(struct fdtable *fdt)
|
static void free_fdmem(void *ptr)
|
||||||
{
|
{
|
||||||
if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
|
is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
|
||||||
kfree(fdt->fd);
|
|
||||||
else
|
|
||||||
vfree(fdt->fd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_fdset(struct fdtable *fdt)
|
static void __free_fdtable(struct fdtable *fdt)
|
||||||
{
|
{
|
||||||
if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
|
free_fdmem(fdt->fd);
|
||||||
kfree(fdt->open_fds);
|
free_fdmem(fdt->open_fds);
|
||||||
else
|
kfree(fdt);
|
||||||
vfree(fdt->open_fds);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_fdtable_work(struct work_struct *work)
|
static void free_fdtable_work(struct work_struct *work)
|
||||||
|
@ -75,9 +74,8 @@ static void free_fdtable_work(struct work_struct *work)
|
||||||
spin_unlock_bh(&f->lock);
|
spin_unlock_bh(&f->lock);
|
||||||
while(fdt) {
|
while(fdt) {
|
||||||
struct fdtable *next = fdt->next;
|
struct fdtable *next = fdt->next;
|
||||||
vfree(fdt->fd);
|
|
||||||
free_fdset(fdt);
|
__free_fdtable(fdt);
|
||||||
kfree(fdt);
|
|
||||||
fdt = next;
|
fdt = next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +96,7 @@ void free_fdtable_rcu(struct rcu_head *rcu)
|
||||||
container_of(fdt, struct files_struct, fdtab));
|
container_of(fdt, struct files_struct, fdtab));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
|
if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
|
||||||
kfree(fdt->fd);
|
kfree(fdt->fd);
|
||||||
kfree(fdt->open_fds);
|
kfree(fdt->open_fds);
|
||||||
kfree(fdt);
|
kfree(fdt);
|
||||||
|
@ -183,7 +181,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
|
||||||
return fdt;
|
return fdt;
|
||||||
|
|
||||||
out_arr:
|
out_arr:
|
||||||
free_fdarr(fdt);
|
free_fdmem(fdt->fd);
|
||||||
out_fdt:
|
out_fdt:
|
||||||
kfree(fdt);
|
kfree(fdt);
|
||||||
out:
|
out:
|
||||||
|
@ -213,9 +211,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
|
||||||
* caller and alloc_fdtable(). Cheaper to catch it here...
|
* caller and alloc_fdtable(). Cheaper to catch it here...
|
||||||
*/
|
*/
|
||||||
if (unlikely(new_fdt->max_fds <= nr)) {
|
if (unlikely(new_fdt->max_fds <= nr)) {
|
||||||
free_fdarr(new_fdt);
|
__free_fdtable(new_fdt);
|
||||||
free_fdset(new_fdt);
|
|
||||||
kfree(new_fdt);
|
|
||||||
return -EMFILE;
|
return -EMFILE;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -231,9 +227,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
|
||||||
free_fdtable(cur_fdt);
|
free_fdtable(cur_fdt);
|
||||||
} else {
|
} else {
|
||||||
/* Somebody else expanded, so undo our attempt */
|
/* Somebody else expanded, so undo our attempt */
|
||||||
free_fdarr(new_fdt);
|
__free_fdtable(new_fdt);
|
||||||
free_fdset(new_fdt);
|
|
||||||
kfree(new_fdt);
|
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -323,11 +317,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
|
||||||
while (unlikely(open_files > new_fdt->max_fds)) {
|
while (unlikely(open_files > new_fdt->max_fds)) {
|
||||||
spin_unlock(&oldf->file_lock);
|
spin_unlock(&oldf->file_lock);
|
||||||
|
|
||||||
if (new_fdt != &newf->fdtab) {
|
if (new_fdt != &newf->fdtab)
|
||||||
free_fdarr(new_fdt);
|
__free_fdtable(new_fdt);
|
||||||
free_fdset(new_fdt);
|
|
||||||
kfree(new_fdt);
|
|
||||||
}
|
|
||||||
|
|
||||||
new_fdt = alloc_fdtable(open_files - 1);
|
new_fdt = alloc_fdtable(open_files - 1);
|
||||||
if (!new_fdt) {
|
if (!new_fdt) {
|
||||||
|
@ -337,9 +328,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
|
||||||
|
|
||||||
/* beyond sysctl_nr_open; nothing to do */
|
/* beyond sysctl_nr_open; nothing to do */
|
||||||
if (unlikely(new_fdt->max_fds < open_files)) {
|
if (unlikely(new_fdt->max_fds < open_files)) {
|
||||||
free_fdarr(new_fdt);
|
__free_fdtable(new_fdt);
|
||||||
free_fdset(new_fdt);
|
|
||||||
kfree(new_fdt);
|
|
||||||
*errorp = -EMFILE;
|
*errorp = -EMFILE;
|
||||||
goto out_release;
|
goto out_release;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue