mm: use lockless radix-tree probe

Probing pages and radix_tree_tagged are lockless operations with the lockless
radix-tree.  Convert these users to RCU locking rather than using tree_lock.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nick Piggin 2007-10-16 01:24:40 -07:00 committed by Linus Torvalds
parent 557ed1fa26
commit 0012818810
2 changed files with 5 additions and 9 deletions

View file

@ -1022,17 +1022,15 @@ int test_set_page_writeback(struct page *page)
EXPORT_SYMBOL(test_set_page_writeback); EXPORT_SYMBOL(test_set_page_writeback);
/* /*
* Return true if any of the pages in the mapping are marged with the * Return true if any of the pages in the mapping are marked with the
* passed tag. * passed tag.
*/ */
int mapping_tagged(struct address_space *mapping, int tag) int mapping_tagged(struct address_space *mapping, int tag)
{ {
unsigned long flags;
int ret; int ret;
rcu_read_lock();
read_lock_irqsave(&mapping->tree_lock, flags);
ret = radix_tree_tagged(&mapping->page_tree, tag); ret = radix_tree_tagged(&mapping->page_tree, tag);
read_unlock_irqrestore(&mapping->tree_lock, flags); rcu_read_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL(mapping_tagged); EXPORT_SYMBOL(mapping_tagged);

View file

@ -149,20 +149,19 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
/* /*
* Preallocate as many pages as we will need. * Preallocate as many pages as we will need.
*/ */
read_lock_irq(&mapping->tree_lock);
for (page_idx = 0; page_idx < nr_to_read; page_idx++) { for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
pgoff_t page_offset = offset + page_idx; pgoff_t page_offset = offset + page_idx;
if (page_offset > end_index) if (page_offset > end_index)
break; break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_offset); page = radix_tree_lookup(&mapping->page_tree, page_offset);
rcu_read_unlock();
if (page) if (page)
continue; continue;
read_unlock_irq(&mapping->tree_lock);
page = page_cache_alloc_cold(mapping); page = page_cache_alloc_cold(mapping);
read_lock_irq(&mapping->tree_lock);
if (!page) if (!page)
break; break;
page->index = page_offset; page->index = page_offset;
@ -171,7 +170,6 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
SetPageReadahead(page); SetPageReadahead(page);
ret++; ret++;
} }
read_unlock_irq(&mapping->tree_lock);
/* /*
* Now start the IO. We ignore I/O errors - if the page is not * Now start the IO. We ignore I/O errors - if the page is not