tmpfs: miscellaneous trivial cleanups
While it's at its least, make a number of boring nitpicky cleanups to shmem.c, mostly for consistency of variable naming. Things like "swap" instead of "entry", "pgoff_t index" instead of "unsigned long idx". And since everything else here is prefixed "shmem_", better change init_tmpfs() to shmem_init(). Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
285b2c4fdd
commit
41ffe5d5ce
3 changed files with 109 additions and 111 deletions
|
@ -47,7 +47,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
|
||||||
/*
|
/*
|
||||||
* Functions in mm/shmem.c called directly from elsewhere:
|
* Functions in mm/shmem.c called directly from elsewhere:
|
||||||
*/
|
*/
|
||||||
extern int init_tmpfs(void);
|
extern int shmem_init(void);
|
||||||
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
|
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
|
||||||
extern struct file *shmem_file_setup(const char *name,
|
extern struct file *shmem_file_setup(const char *name,
|
||||||
loff_t size, unsigned long flags);
|
loff_t size, unsigned long flags);
|
||||||
|
|
|
@ -715,7 +715,7 @@ static void __init do_basic_setup(void)
|
||||||
{
|
{
|
||||||
cpuset_init_smp();
|
cpuset_init_smp();
|
||||||
usermodehelper_init();
|
usermodehelper_init();
|
||||||
init_tmpfs();
|
shmem_init();
|
||||||
driver_init();
|
driver_init();
|
||||||
init_irq_proc();
|
init_irq_proc();
|
||||||
do_ctors();
|
do_ctors();
|
||||||
|
|
216
mm/shmem.c
216
mm/shmem.c
|
@ -28,7 +28,6 @@
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/percpu_counter.h>
|
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
|
|
||||||
static struct vfsmount *shm_mnt;
|
static struct vfsmount *shm_mnt;
|
||||||
|
@ -51,6 +50,7 @@ static struct vfsmount *shm_mnt;
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/percpu_counter.h>
|
||||||
#include <linux/splice.h>
|
#include <linux/splice.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/swapops.h>
|
#include <linux/swapops.h>
|
||||||
|
@ -63,7 +63,6 @@ static struct vfsmount *shm_mnt;
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/div64.h>
|
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
|
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
|
||||||
|
@ -201,7 +200,7 @@ static void shmem_free_inode(struct super_block *sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* shmem_recalc_inode - recalculate the size of an inode
|
* shmem_recalc_inode - recalculate the block usage of an inode
|
||||||
* @inode: inode to recalc
|
* @inode: inode to recalc
|
||||||
*
|
*
|
||||||
* We have to calculate the free blocks since the mm can drop
|
* We have to calculate the free blocks since the mm can drop
|
||||||
|
@ -356,19 +355,20 @@ static void shmem_evict_inode(struct inode *inode)
|
||||||
end_writeback(inode);
|
end_writeback(inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
|
static int shmem_unuse_inode(struct shmem_inode_info *info,
|
||||||
|
swp_entry_t swap, struct page *page)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = info->vfs_inode.i_mapping;
|
struct address_space *mapping = info->vfs_inode.i_mapping;
|
||||||
unsigned long idx;
|
pgoff_t index;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
for (idx = 0; idx < SHMEM_NR_DIRECT; idx++)
|
for (index = 0; index < SHMEM_NR_DIRECT; index++)
|
||||||
if (shmem_get_swap(info, idx).val == entry.val)
|
if (shmem_get_swap(info, index).val == swap.val)
|
||||||
goto found;
|
goto found;
|
||||||
return 0;
|
return 0;
|
||||||
found:
|
found:
|
||||||
spin_lock(&info->lock);
|
spin_lock(&info->lock);
|
||||||
if (shmem_get_swap(info, idx).val != entry.val) {
|
if (shmem_get_swap(info, index).val != swap.val) {
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -387,15 +387,15 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
|
||||||
* but also to hold up shmem_evict_inode(): so inode cannot be freed
|
* but also to hold up shmem_evict_inode(): so inode cannot be freed
|
||||||
* beneath us (pagelock doesn't help until the page is in pagecache).
|
* beneath us (pagelock doesn't help until the page is in pagecache).
|
||||||
*/
|
*/
|
||||||
error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
|
error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT);
|
||||||
/* which does mem_cgroup_uncharge_cache_page on error */
|
/* which does mem_cgroup_uncharge_cache_page on error */
|
||||||
|
|
||||||
if (error != -ENOMEM) {
|
if (error != -ENOMEM) {
|
||||||
delete_from_swap_cache(page);
|
delete_from_swap_cache(page);
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
shmem_put_swap(info, idx, (swp_entry_t){0});
|
shmem_put_swap(info, index, (swp_entry_t){0});
|
||||||
info->swapped--;
|
info->swapped--;
|
||||||
swap_free(entry);
|
swap_free(swap);
|
||||||
error = 1; /* not an error, but entry was found */
|
error = 1; /* not an error, but entry was found */
|
||||||
}
|
}
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
|
@ -405,9 +405,9 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
|
||||||
/*
|
/*
|
||||||
* shmem_unuse() search for an eventually swapped out shmem page.
|
* shmem_unuse() search for an eventually swapped out shmem page.
|
||||||
*/
|
*/
|
||||||
int shmem_unuse(swp_entry_t entry, struct page *page)
|
int shmem_unuse(swp_entry_t swap, struct page *page)
|
||||||
{
|
{
|
||||||
struct list_head *p, *next;
|
struct list_head *this, *next;
|
||||||
struct shmem_inode_info *info;
|
struct shmem_inode_info *info;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
int error;
|
int error;
|
||||||
|
@ -432,8 +432,8 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
||||||
radix_tree_preload_end();
|
radix_tree_preload_end();
|
||||||
|
|
||||||
mutex_lock(&shmem_swaplist_mutex);
|
mutex_lock(&shmem_swaplist_mutex);
|
||||||
list_for_each_safe(p, next, &shmem_swaplist) {
|
list_for_each_safe(this, next, &shmem_swaplist) {
|
||||||
info = list_entry(p, struct shmem_inode_info, swaplist);
|
info = list_entry(this, struct shmem_inode_info, swaplist);
|
||||||
if (!info->swapped) {
|
if (!info->swapped) {
|
||||||
spin_lock(&info->lock);
|
spin_lock(&info->lock);
|
||||||
if (!info->swapped)
|
if (!info->swapped)
|
||||||
|
@ -441,7 +441,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
}
|
}
|
||||||
if (info->swapped)
|
if (info->swapped)
|
||||||
found = shmem_unuse_inode(info, entry, page);
|
found = shmem_unuse_inode(info, swap, page);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
if (found)
|
if (found)
|
||||||
break;
|
break;
|
||||||
|
@ -467,7 +467,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
struct shmem_inode_info *info;
|
struct shmem_inode_info *info;
|
||||||
swp_entry_t swap, oswap;
|
swp_entry_t swap, oswap;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
unsigned long index;
|
pgoff_t index;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
|
@ -577,35 +577,33 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TMPFS */
|
#endif /* CONFIG_TMPFS */
|
||||||
|
|
||||||
static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
|
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
||||||
struct shmem_inode_info *info, unsigned long idx)
|
struct shmem_inode_info *info, pgoff_t index)
|
||||||
{
|
{
|
||||||
struct mempolicy mpol, *spol;
|
struct mempolicy mpol, *spol;
|
||||||
struct vm_area_struct pvma;
|
struct vm_area_struct pvma;
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
spol = mpol_cond_copy(&mpol,
|
spol = mpol_cond_copy(&mpol,
|
||||||
mpol_shared_policy_lookup(&info->policy, idx));
|
mpol_shared_policy_lookup(&info->policy, index));
|
||||||
|
|
||||||
/* Create a pseudo vma that just contains the policy */
|
/* Create a pseudo vma that just contains the policy */
|
||||||
pvma.vm_start = 0;
|
pvma.vm_start = 0;
|
||||||
pvma.vm_pgoff = idx;
|
pvma.vm_pgoff = index;
|
||||||
pvma.vm_ops = NULL;
|
pvma.vm_ops = NULL;
|
||||||
pvma.vm_policy = spol;
|
pvma.vm_policy = spol;
|
||||||
page = swapin_readahead(entry, gfp, &pvma, 0);
|
return swapin_readahead(swap, gfp, &pvma, 0);
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *shmem_alloc_page(gfp_t gfp,
|
static struct page *shmem_alloc_page(gfp_t gfp,
|
||||||
struct shmem_inode_info *info, unsigned long idx)
|
struct shmem_inode_info *info, pgoff_t index)
|
||||||
{
|
{
|
||||||
struct vm_area_struct pvma;
|
struct vm_area_struct pvma;
|
||||||
|
|
||||||
/* Create a pseudo vma that just contains the policy */
|
/* Create a pseudo vma that just contains the policy */
|
||||||
pvma.vm_start = 0;
|
pvma.vm_start = 0;
|
||||||
pvma.vm_pgoff = idx;
|
pvma.vm_pgoff = index;
|
||||||
pvma.vm_ops = NULL;
|
pvma.vm_ops = NULL;
|
||||||
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
|
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* alloc_page_vma() will drop the shared policy reference
|
* alloc_page_vma() will drop the shared policy reference
|
||||||
|
@ -614,19 +612,19 @@ static struct page *shmem_alloc_page(gfp_t gfp,
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_NUMA */
|
#else /* !CONFIG_NUMA */
|
||||||
#ifdef CONFIG_TMPFS
|
#ifdef CONFIG_TMPFS
|
||||||
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
|
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TMPFS */
|
#endif /* CONFIG_TMPFS */
|
||||||
|
|
||||||
static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
|
static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
||||||
struct shmem_inode_info *info, unsigned long idx)
|
struct shmem_inode_info *info, pgoff_t index)
|
||||||
{
|
{
|
||||||
return swapin_readahead(entry, gfp, NULL, 0);
|
return swapin_readahead(swap, gfp, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *shmem_alloc_page(gfp_t gfp,
|
static inline struct page *shmem_alloc_page(gfp_t gfp,
|
||||||
struct shmem_inode_info *info, unsigned long idx)
|
struct shmem_inode_info *info, pgoff_t index)
|
||||||
{
|
{
|
||||||
return alloc_page(gfp);
|
return alloc_page(gfp);
|
||||||
}
|
}
|
||||||
|
@ -646,7 +644,7 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
||||||
* vm. If we swap it in we mark it dirty since we also free the swap
|
* vm. If we swap it in we mark it dirty since we also free the swap
|
||||||
* entry since a page cannot live in both the swap and page cache
|
* entry since a page cannot live in both the swap and page cache
|
||||||
*/
|
*/
|
||||||
static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
|
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
|
@ -657,10 +655,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
swp_entry_t swap;
|
swp_entry_t swap;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (idx > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
|
if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
repeat:
|
repeat:
|
||||||
page = find_lock_page(mapping, idx);
|
page = find_lock_page(mapping, index);
|
||||||
if (page) {
|
if (page) {
|
||||||
/*
|
/*
|
||||||
* Once we can get the page lock, it must be uptodate:
|
* Once we can get the page lock, it must be uptodate:
|
||||||
|
@ -681,7 +679,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
radix_tree_preload_end();
|
radix_tree_preload_end();
|
||||||
|
|
||||||
if (sgp != SGP_READ && !prealloc_page) {
|
if (sgp != SGP_READ && !prealloc_page) {
|
||||||
prealloc_page = shmem_alloc_page(gfp, info, idx);
|
prealloc_page = shmem_alloc_page(gfp, info, index);
|
||||||
if (prealloc_page) {
|
if (prealloc_page) {
|
||||||
SetPageSwapBacked(prealloc_page);
|
SetPageSwapBacked(prealloc_page);
|
||||||
if (mem_cgroup_cache_charge(prealloc_page,
|
if (mem_cgroup_cache_charge(prealloc_page,
|
||||||
|
@ -694,7 +692,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
|
|
||||||
spin_lock(&info->lock);
|
spin_lock(&info->lock);
|
||||||
shmem_recalc_inode(inode);
|
shmem_recalc_inode(inode);
|
||||||
swap = shmem_get_swap(info, idx);
|
swap = shmem_get_swap(info, index);
|
||||||
if (swap.val) {
|
if (swap.val) {
|
||||||
/* Look it up and read it in.. */
|
/* Look it up and read it in.. */
|
||||||
page = lookup_swap_cache(swap);
|
page = lookup_swap_cache(swap);
|
||||||
|
@ -703,9 +701,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
/* here we actually do the io */
|
/* here we actually do the io */
|
||||||
if (fault_type)
|
if (fault_type)
|
||||||
*fault_type |= VM_FAULT_MAJOR;
|
*fault_type |= VM_FAULT_MAJOR;
|
||||||
page = shmem_swapin(swap, gfp, info, idx);
|
page = shmem_swapin(swap, gfp, info, index);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
swp_entry_t nswap = shmem_get_swap(info, idx);
|
swp_entry_t nswap = shmem_get_swap(info, index);
|
||||||
if (nswap.val == swap.val) {
|
if (nswap.val == swap.val) {
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -740,7 +738,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
error = add_to_page_cache_locked(page, mapping,
|
error = add_to_page_cache_locked(page, mapping,
|
||||||
idx, GFP_NOWAIT);
|
index, GFP_NOWAIT);
|
||||||
if (error) {
|
if (error) {
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
if (error == -ENOMEM) {
|
if (error == -ENOMEM) {
|
||||||
|
@ -762,14 +760,14 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_from_swap_cache(page);
|
delete_from_swap_cache(page);
|
||||||
shmem_put_swap(info, idx, (swp_entry_t){0});
|
shmem_put_swap(info, index, (swp_entry_t){0});
|
||||||
info->swapped--;
|
info->swapped--;
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
swap_free(swap);
|
swap_free(swap);
|
||||||
|
|
||||||
} else if (sgp == SGP_READ) {
|
} else if (sgp == SGP_READ) {
|
||||||
page = find_get_page(mapping, idx);
|
page = find_get_page(mapping, index);
|
||||||
if (page && !trylock_page(page)) {
|
if (page && !trylock_page(page)) {
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
wait_on_page_locked(page);
|
wait_on_page_locked(page);
|
||||||
|
@ -793,12 +791,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
page = prealloc_page;
|
page = prealloc_page;
|
||||||
prealloc_page = NULL;
|
prealloc_page = NULL;
|
||||||
|
|
||||||
swap = shmem_get_swap(info, idx);
|
swap = shmem_get_swap(info, index);
|
||||||
if (swap.val)
|
if (swap.val)
|
||||||
mem_cgroup_uncharge_cache_page(page);
|
mem_cgroup_uncharge_cache_page(page);
|
||||||
else
|
else
|
||||||
error = add_to_page_cache_lru(page, mapping,
|
error = add_to_page_cache_lru(page, mapping,
|
||||||
idx, GFP_NOWAIT);
|
index, GFP_NOWAIT);
|
||||||
/*
|
/*
|
||||||
* At add_to_page_cache_lru() failure,
|
* At add_to_page_cache_lru() failure,
|
||||||
* uncharge will be done automatically.
|
* uncharge will be done automatically.
|
||||||
|
@ -841,7 +839,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
|
||||||
* but must also avoid reporting a spurious ENOSPC while working on a
|
* but must also avoid reporting a spurious ENOSPC while working on a
|
||||||
* full tmpfs.
|
* full tmpfs.
|
||||||
*/
|
*/
|
||||||
page = find_get_page(mapping, idx);
|
page = find_get_page(mapping, index);
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
if (page) {
|
if (page) {
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
@ -872,20 +870,20 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
|
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
|
||||||
{
|
{
|
||||||
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
|
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
||||||
return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
|
return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
|
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
|
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
||||||
unsigned long idx;
|
pgoff_t index;
|
||||||
|
|
||||||
idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
||||||
return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
|
return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1016,7 +1014,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
|
||||||
{
|
{
|
||||||
struct inode *inode = filp->f_path.dentry->d_inode;
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
unsigned long index, offset;
|
pgoff_t index;
|
||||||
|
unsigned long offset;
|
||||||
enum sgp_type sgp = SGP_READ;
|
enum sgp_type sgp = SGP_READ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1032,7 +1031,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
unsigned long end_index, nr, ret;
|
pgoff_t end_index;
|
||||||
|
unsigned long nr, ret;
|
||||||
loff_t i_size = i_size_read(inode);
|
loff_t i_size = i_size_read(inode);
|
||||||
|
|
||||||
end_index = i_size >> PAGE_CACHE_SHIFT;
|
end_index = i_size >> PAGE_CACHE_SHIFT;
|
||||||
|
@ -1270,8 +1270,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||||
buf->f_namelen = NAME_MAX;
|
buf->f_namelen = NAME_MAX;
|
||||||
if (sbinfo->max_blocks) {
|
if (sbinfo->max_blocks) {
|
||||||
buf->f_blocks = sbinfo->max_blocks;
|
buf->f_blocks = sbinfo->max_blocks;
|
||||||
buf->f_bavail = buf->f_bfree =
|
buf->f_bavail =
|
||||||
sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
|
buf->f_bfree = sbinfo->max_blocks -
|
||||||
|
percpu_counter_sum(&sbinfo->used_blocks);
|
||||||
}
|
}
|
||||||
if (sbinfo->max_inodes) {
|
if (sbinfo->max_inodes) {
|
||||||
buf->f_files = sbinfo->max_inodes;
|
buf->f_files = sbinfo->max_inodes;
|
||||||
|
@ -1480,8 +1481,8 @@ static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *n
|
||||||
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
|
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
|
int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
|
||||||
nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
|
nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
|
||||||
if (page)
|
if (page)
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return page;
|
return page;
|
||||||
|
@ -1592,7 +1593,6 @@ static int shmem_xattr_set(struct dentry *dentry, const char *name,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static const struct xattr_handler *shmem_xattr_handlers[] = {
|
static const struct xattr_handler *shmem_xattr_handlers[] = {
|
||||||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||||
&generic_acl_access_handler,
|
&generic_acl_access_handler,
|
||||||
|
@ -2052,14 +2052,14 @@ static struct kmem_cache *shmem_inode_cachep;
|
||||||
|
|
||||||
static struct inode *shmem_alloc_inode(struct super_block *sb)
|
static struct inode *shmem_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct shmem_inode_info *p;
|
struct shmem_inode_info *info;
|
||||||
p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
|
info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
|
||||||
if (!p)
|
if (!info)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &p->vfs_inode;
|
return &info->vfs_inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void shmem_i_callback(struct rcu_head *head)
|
static void shmem_destroy_callback(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct inode *inode = container_of(head, struct inode, i_rcu);
|
struct inode *inode = container_of(head, struct inode, i_rcu);
|
||||||
INIT_LIST_HEAD(&inode->i_dentry);
|
INIT_LIST_HEAD(&inode->i_dentry);
|
||||||
|
@ -2072,25 +2072,24 @@ static void shmem_destroy_inode(struct inode *inode)
|
||||||
/* only struct inode is valid if it's an inline symlink */
|
/* only struct inode is valid if it's an inline symlink */
|
||||||
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
|
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
|
||||||
}
|
}
|
||||||
call_rcu(&inode->i_rcu, shmem_i_callback);
|
call_rcu(&inode->i_rcu, shmem_destroy_callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_once(void *foo)
|
static void shmem_init_inode(void *foo)
|
||||||
{
|
{
|
||||||
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
|
struct shmem_inode_info *info = foo;
|
||||||
|
inode_init_once(&info->vfs_inode);
|
||||||
inode_init_once(&p->vfs_inode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_inodecache(void)
|
static int shmem_init_inodecache(void)
|
||||||
{
|
{
|
||||||
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
|
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
|
||||||
sizeof(struct shmem_inode_info),
|
sizeof(struct shmem_inode_info),
|
||||||
0, SLAB_PANIC, init_once);
|
0, SLAB_PANIC, shmem_init_inode);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_inodecache(void)
|
static void shmem_destroy_inodecache(void)
|
||||||
{
|
{
|
||||||
kmem_cache_destroy(shmem_inode_cachep);
|
kmem_cache_destroy(shmem_inode_cachep);
|
||||||
}
|
}
|
||||||
|
@ -2187,21 +2186,20 @@ static const struct vm_operations_struct shmem_vm_ops = {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static struct dentry *shmem_mount(struct file_system_type *fs_type,
|
static struct dentry *shmem_mount(struct file_system_type *fs_type,
|
||||||
int flags, const char *dev_name, void *data)
|
int flags, const char *dev_name, void *data)
|
||||||
{
|
{
|
||||||
return mount_nodev(fs_type, flags, data, shmem_fill_super);
|
return mount_nodev(fs_type, flags, data, shmem_fill_super);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file_system_type tmpfs_fs_type = {
|
static struct file_system_type shmem_fs_type = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.name = "tmpfs",
|
.name = "tmpfs",
|
||||||
.mount = shmem_mount,
|
.mount = shmem_mount,
|
||||||
.kill_sb = kill_litter_super,
|
.kill_sb = kill_litter_super,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init init_tmpfs(void)
|
int __init shmem_init(void)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
@ -2209,18 +2207,18 @@ int __init init_tmpfs(void)
|
||||||
if (error)
|
if (error)
|
||||||
goto out4;
|
goto out4;
|
||||||
|
|
||||||
error = init_inodecache();
|
error = shmem_init_inodecache();
|
||||||
if (error)
|
if (error)
|
||||||
goto out3;
|
goto out3;
|
||||||
|
|
||||||
error = register_filesystem(&tmpfs_fs_type);
|
error = register_filesystem(&shmem_fs_type);
|
||||||
if (error) {
|
if (error) {
|
||||||
printk(KERN_ERR "Could not register tmpfs\n");
|
printk(KERN_ERR "Could not register tmpfs\n");
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|
||||||
shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
|
shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
|
||||||
tmpfs_fs_type.name, NULL);
|
shmem_fs_type.name, NULL);
|
||||||
if (IS_ERR(shm_mnt)) {
|
if (IS_ERR(shm_mnt)) {
|
||||||
error = PTR_ERR(shm_mnt);
|
error = PTR_ERR(shm_mnt);
|
||||||
printk(KERN_ERR "Could not kern_mount tmpfs\n");
|
printk(KERN_ERR "Could not kern_mount tmpfs\n");
|
||||||
|
@ -2229,9 +2227,9 @@ int __init init_tmpfs(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out1:
|
out1:
|
||||||
unregister_filesystem(&tmpfs_fs_type);
|
unregister_filesystem(&shmem_fs_type);
|
||||||
out2:
|
out2:
|
||||||
destroy_inodecache();
|
shmem_destroy_inodecache();
|
||||||
out3:
|
out3:
|
||||||
bdi_destroy(&shmem_backing_dev_info);
|
bdi_destroy(&shmem_backing_dev_info);
|
||||||
out4:
|
out4:
|
||||||
|
@ -2241,37 +2239,37 @@ int __init init_tmpfs(void)
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||||
/**
|
/**
|
||||||
* mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
|
* mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
|
||||||
* @inode: the inode to be searched
|
* @inode: the inode to be searched
|
||||||
* @pgoff: the offset to be searched
|
* @index: the page offset to be searched
|
||||||
* @pagep: the pointer for the found page to be stored
|
* @pagep: the pointer for the found page to be stored
|
||||||
* @ent: the pointer for the found swap entry to be stored
|
* @swapp: the pointer for the found swap entry to be stored
|
||||||
*
|
*
|
||||||
* If a page is found, refcount of it is incremented. Callers should handle
|
* If a page is found, refcount of it is incremented. Callers should handle
|
||||||
* these refcount.
|
* these refcount.
|
||||||
*/
|
*/
|
||||||
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
|
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
|
||||||
struct page **pagep, swp_entry_t *ent)
|
struct page **pagep, swp_entry_t *swapp)
|
||||||
{
|
{
|
||||||
swp_entry_t entry = { .val = 0 };
|
|
||||||
struct page *page = NULL;
|
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
|
struct page *page = NULL;
|
||||||
|
swp_entry_t swap = {0};
|
||||||
|
|
||||||
if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
spin_lock(&info->lock);
|
spin_lock(&info->lock);
|
||||||
#ifdef CONFIG_SWAP
|
#ifdef CONFIG_SWAP
|
||||||
entry = shmem_get_swap(info, pgoff);
|
swap = shmem_get_swap(info, index);
|
||||||
if (entry.val)
|
if (swap.val)
|
||||||
page = find_get_page(&swapper_space, entry.val);
|
page = find_get_page(&swapper_space, swap.val);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
page = find_get_page(inode->i_mapping, pgoff);
|
page = find_get_page(inode->i_mapping, index);
|
||||||
spin_unlock(&info->lock);
|
spin_unlock(&info->lock);
|
||||||
out:
|
out:
|
||||||
*pagep = page;
|
*pagep = page;
|
||||||
*ent = entry;
|
*swapp = swap;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -2288,23 +2286,23 @@ void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
|
||||||
|
|
||||||
#include <linux/ramfs.h>
|
#include <linux/ramfs.h>
|
||||||
|
|
||||||
static struct file_system_type tmpfs_fs_type = {
|
static struct file_system_type shmem_fs_type = {
|
||||||
.name = "tmpfs",
|
.name = "tmpfs",
|
||||||
.mount = ramfs_mount,
|
.mount = ramfs_mount,
|
||||||
.kill_sb = kill_litter_super,
|
.kill_sb = kill_litter_super,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init init_tmpfs(void)
|
int __init shmem_init(void)
|
||||||
{
|
{
|
||||||
BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
|
BUG_ON(register_filesystem(&shmem_fs_type) != 0);
|
||||||
|
|
||||||
shm_mnt = kern_mount(&tmpfs_fs_type);
|
shm_mnt = kern_mount(&shmem_fs_type);
|
||||||
BUG_ON(IS_ERR(shm_mnt));
|
BUG_ON(IS_ERR(shm_mnt));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int shmem_unuse(swp_entry_t entry, struct page *page)
|
int shmem_unuse(swp_entry_t swap, struct page *page)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2314,34 +2312,34 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
|
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
||||||
{
|
{
|
||||||
truncate_inode_pages_range(inode->i_mapping, start, end);
|
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shmem_truncate_range);
|
EXPORT_SYMBOL_GPL(shmem_truncate_range);
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||||
/**
|
/**
|
||||||
* mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
|
* mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
|
||||||
* @inode: the inode to be searched
|
* @inode: the inode to be searched
|
||||||
* @pgoff: the offset to be searched
|
* @index: the page offset to be searched
|
||||||
* @pagep: the pointer for the found page to be stored
|
* @pagep: the pointer for the found page to be stored
|
||||||
* @ent: the pointer for the found swap entry to be stored
|
* @swapp: the pointer for the found swap entry to be stored
|
||||||
*
|
*
|
||||||
* If a page is found, refcount of it is incremented. Callers should handle
|
* If a page is found, refcount of it is incremented. Callers should handle
|
||||||
* these refcount.
|
* these refcount.
|
||||||
*/
|
*/
|
||||||
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
|
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
|
||||||
struct page **pagep, swp_entry_t *ent)
|
struct page **pagep, swp_entry_t *swapp)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
||||||
goto out;
|
goto out;
|
||||||
page = find_get_page(inode->i_mapping, pgoff);
|
page = find_get_page(inode->i_mapping, index);
|
||||||
out:
|
out:
|
||||||
*pagep = page;
|
*pagep = page;
|
||||||
*ent = (swp_entry_t){ .val = 0 };
|
*swapp = (swp_entry_t){0};
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue