mm, vmalloc: iterate vmap_area_list in get_vmalloc_info()
This patch is a preparatory step for removing vmlist entirely. For above purpose, we change iterating a vmap_list codes to iterating a vmap_area_list. It is somewhat trivial change, but just one thing should be noticed. vmlist is lack of information about some areas in vmalloc address space. For example, vm_map_ram() allocate area in vmalloc address space, but it doesn't make a link with vmlist. To provide full information about vmalloc address space is better idea, so we don't use va->vm and use vmap_area directly. This makes get_vmalloc_info() more precise. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Dave Anderson <anderson@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Ingo Molnar <mingo@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e81ce85f96
commit
f98782ddd3
1 changed files with 37 additions and 33 deletions
70
mm/vmalloc.c
70
mm/vmalloc.c
|
@ -2671,46 +2671,50 @@ module_init(proc_vmalloc_init);
|
|||
|
||||
void get_vmalloc_info(struct vmalloc_info *vmi)
|
||||
{
|
||||
struct vm_struct *vma;
|
||||
struct vmap_area *va;
|
||||
unsigned long free_area_size;
|
||||
unsigned long prev_end;
|
||||
|
||||
vmi->used = 0;
|
||||
vmi->largest_chunk = 0;
|
||||
|
||||
if (!vmlist) {
|
||||
prev_end = VMALLOC_START;
|
||||
|
||||
spin_lock(&vmap_area_lock);
|
||||
|
||||
if (list_empty(&vmap_area_list)) {
|
||||
vmi->largest_chunk = VMALLOC_TOTAL;
|
||||
} else {
|
||||
vmi->largest_chunk = 0;
|
||||
|
||||
prev_end = VMALLOC_START;
|
||||
|
||||
read_lock(&vmlist_lock);
|
||||
|
||||
for (vma = vmlist; vma; vma = vma->next) {
|
||||
unsigned long addr = (unsigned long) vma->addr;
|
||||
|
||||
/*
|
||||
* Some archs keep another range for modules in vmlist
|
||||
*/
|
||||
if (addr < VMALLOC_START)
|
||||
continue;
|
||||
if (addr >= VMALLOC_END)
|
||||
break;
|
||||
|
||||
vmi->used += vma->size;
|
||||
|
||||
free_area_size = addr - prev_end;
|
||||
if (vmi->largest_chunk < free_area_size)
|
||||
vmi->largest_chunk = free_area_size;
|
||||
|
||||
prev_end = vma->size + addr;
|
||||
}
|
||||
|
||||
if (VMALLOC_END - prev_end > vmi->largest_chunk)
|
||||
vmi->largest_chunk = VMALLOC_END - prev_end;
|
||||
|
||||
read_unlock(&vmlist_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(va, &vmap_area_list, list) {
|
||||
unsigned long addr = va->va_start;
|
||||
|
||||
/*
|
||||
* Some archs keep another range for modules in vmalloc space
|
||||
*/
|
||||
if (addr < VMALLOC_START)
|
||||
continue;
|
||||
if (addr >= VMALLOC_END)
|
||||
break;
|
||||
|
||||
if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
|
||||
continue;
|
||||
|
||||
vmi->used += (va->va_end - va->va_start);
|
||||
|
||||
free_area_size = addr - prev_end;
|
||||
if (vmi->largest_chunk < free_area_size)
|
||||
vmi->largest_chunk = free_area_size;
|
||||
|
||||
prev_end = va->va_end;
|
||||
}
|
||||
|
||||
if (VMALLOC_END - prev_end > vmi->largest_chunk)
|
||||
vmi->largest_chunk = VMALLOC_END - prev_end;
|
||||
|
||||
out:
|
||||
spin_unlock(&vmap_area_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in a new issue