vma_adjust: fix the copying of anon_vma chains

When we move the boundaries between two vma's due to things like
mprotect, we need to make sure that the anon_vma of the pages that got
moved from one vma to another gets properly copied around.  And that was
not always the case, in this rather hard-to-follow code sequence.

Clarify the code, and fix it so that it copies the anon_vma from the
right source.

Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Tested-by: Borislav Petkov <bp@alien8.de> [ "Yeah, not so much this one either" ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2010-04-10 15:22:30 -07:00
parent d0e9fe1758
commit 287d97ac03

View file

@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
struct address_space *mapping = NULL; struct address_space *mapping = NULL;
struct prio_tree_root *root = NULL; struct prio_tree_root *root = NULL;
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
struct anon_vma *anon_vma = NULL;
long adjust_next = 0; long adjust_next = 0;
int remove_next = 0; int remove_next = 0;
if (next && !insert) { if (next && !insert) {
struct vm_area_struct *exporter = NULL;
if (end >= next->vm_end) { if (end >= next->vm_end) {
/* /*
* vma expands, overlapping all the next, and * vma expands, overlapping all the next, and
@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
*/ */
again: remove_next = 1 + (end > next->vm_end); again: remove_next = 1 + (end > next->vm_end);
end = next->vm_end; end = next->vm_end;
anon_vma = next->anon_vma; exporter = next;
importer = vma; importer = vma;
} else if (end > next->vm_start) { } else if (end > next->vm_start) {
/* /*
@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end);
* mprotect case 5 shifting the boundary up. * mprotect case 5 shifting the boundary up.
*/ */
adjust_next = (end - next->vm_start) >> PAGE_SHIFT; adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
anon_vma = next->anon_vma; exporter = next;
importer = vma; importer = vma;
} else if (end < vma->vm_end) { } else if (end < vma->vm_end) {
/* /*
@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end);
* mprotect case 4 shifting the boundary down. * mprotect case 4 shifting the boundary down.
*/ */
adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
anon_vma = next->anon_vma; exporter = vma;
importer = next; importer = next;
} }
}
/*
* When changing only vma->vm_end, we don't really need anon_vma lock.
*/
if (vma->anon_vma && (insert || importer || start != vma->vm_start))
anon_vma = vma->anon_vma;
if (anon_vma) {
/* /*
* Easily overlooked: when mprotect shifts the boundary, * Easily overlooked: when mprotect shifts the boundary,
* make sure the expanding vma has anon_vma set if the * make sure the expanding vma has anon_vma set if the
* shrinking vma had, to cover any anon pages imported. * shrinking vma had, to cover any anon pages imported.
*/ */
if (importer && !importer->anon_vma) { if (exporter && exporter->anon_vma && !importer->anon_vma) {
/* Block reverse map lookups until things are set up. */ if (anon_vma_clone(importer, exporter))
if (anon_vma_clone(importer, vma)) {
return -ENOMEM; return -ENOMEM;
} importer->anon_vma = exporter->anon_vma;
importer->anon_vma = anon_vma;
} }
} }