Fix memory ordering bug in page reclaim
As noticed by Nick Piggin, we need to make sure that we check the page count before we check for PageDirty, since the dirty check is only valid if the count implies that we're the only possible ones holding the page. We always did do this, but the code needs a read-memory-barrier to make sure that the orderign is also honored by the CPU. (The writer side is ordered due to the atomic decrement and test on the page count, see the discussion on linux-kernel) Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
688ce17b85
commit
3d80636a0d
1 changed files with 9 additions and 4 deletions
13
mm/vmscan.c
13
mm/vmscan.c
|
@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
|
|||
* PageDirty _after_ making sure that the page is freeable and
|
||||
* not in use by anybody. (pagecache + us == 2)
|
||||
*/
|
||||
if (page_count(page) != 2 || PageDirty(page)) {
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
goto keep_locked;
|
||||
}
|
||||
if (unlikely(page_count(page) != 2))
|
||||
goto cannot_free;
|
||||
smp_rmb();
|
||||
if (unlikely(PageDirty(page)))
|
||||
goto cannot_free;
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
if (PageSwapCache(page)) {
|
||||
|
@ -538,6 +539,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
|
|||
__pagevec_release_nonlru(&freed_pvec);
|
||||
continue;
|
||||
|
||||
cannot_free:
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
goto keep_locked;
|
||||
|
||||
activate_locked:
|
||||
SetPageActive(page);
|
||||
pgactivate++;
|
||||
|
|
Loading…
Reference in a new issue