[PATCH] page_uptodate locking scalability
Use a bit spin lock in the first buffer of the page to synchronise asynch IO buffer completions, instead of the global page_uptodate_lock, which is showing some scalabilty problems. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
d6afe27bff
commit
a39722034a
2 changed files with 20 additions and 8 deletions
25
fs/buffer.c
25
fs/buffer.c
|
@ -513,8 +513,8 @@ static void free_more_memory(void)
|
|||
*/
|
||||
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
||||
{
|
||||
static DEFINE_SPINLOCK(page_uptodate_lock);
|
||||
unsigned long flags;
|
||||
struct buffer_head *first;
|
||||
struct buffer_head *tmp;
|
||||
struct page *page;
|
||||
int page_uptodate = 1;
|
||||
|
@ -536,7 +536,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|||
* two buffer heads end IO at almost the same time and both
|
||||
* decide that the page is now completely done.
|
||||
*/
|
||||
spin_lock_irqsave(&page_uptodate_lock, flags);
|
||||
first = page_buffers(page);
|
||||
local_irq_save(flags);
|
||||
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
||||
clear_buffer_async_read(bh);
|
||||
unlock_buffer(bh);
|
||||
tmp = bh;
|
||||
|
@ -549,7 +551,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|||
}
|
||||
tmp = tmp->b_this_page;
|
||||
} while (tmp != bh);
|
||||
spin_unlock_irqrestore(&page_uptodate_lock, flags);
|
||||
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* If none of the buffers had errors and they are all
|
||||
|
@ -561,7 +564,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|||
return;
|
||||
|
||||
still_busy:
|
||||
spin_unlock_irqrestore(&page_uptodate_lock, flags);
|
||||
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -572,8 +576,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|||
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
||||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
static DEFINE_SPINLOCK(page_uptodate_lock);
|
||||
unsigned long flags;
|
||||
struct buffer_head *first;
|
||||
struct buffer_head *tmp;
|
||||
struct page *page;
|
||||
|
||||
|
@ -594,7 +598,10 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|||
SetPageError(page);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&page_uptodate_lock, flags);
|
||||
first = page_buffers(page);
|
||||
local_irq_save(flags);
|
||||
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
||||
|
||||
clear_buffer_async_write(bh);
|
||||
unlock_buffer(bh);
|
||||
tmp = bh->b_this_page;
|
||||
|
@ -605,12 +612,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|||
}
|
||||
tmp = tmp->b_this_page;
|
||||
}
|
||||
spin_unlock_irqrestore(&page_uptodate_lock, flags);
|
||||
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
local_irq_restore(flags);
|
||||
end_page_writeback(page);
|
||||
return;
|
||||
|
||||
still_busy:
|
||||
spin_unlock_irqrestore(&page_uptodate_lock, flags);
|
||||
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@ enum bh_state_bits {
|
|||
BH_Dirty, /* Is dirty */
|
||||
BH_Lock, /* Is locked */
|
||||
BH_Req, /* Has been submitted for I/O */
|
||||
BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
|
||||
* IO completion of other buffers in the page
|
||||
*/
|
||||
|
||||
BH_Mapped, /* Has a disk mapping */
|
||||
BH_New, /* Disk mapping was newly created by get_block */
|
||||
|
|
Loading…
Reference in a new issue