mm/dmapool.c: take lock only once in dma_pool_free()
dma_pool_free() scans for the page to free in the pool list holding the pool lock. Then it releases the lock basically to acquire it immediately again. Modify the code to only take the lock once. This will do some additional loops and computations with the lock held in if memory debugging is activated. If it is not activated the only new operations with this lock is one if and one substraction. Signed-off-by: Rolf Eike Beer <eike-kernel@sf-tec.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
43506fad21
commit
84bc227d7f
1 changed files with 6 additions and 8 deletions
14
mm/dmapool.c
14
mm/dmapool.c
|
@ -355,21 +355,16 @@ EXPORT_SYMBOL(dma_pool_alloc);
|
||||||
|
|
||||||
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
|
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct dma_page *page;
|
struct dma_page *page;
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
|
||||||
list_for_each_entry(page, &pool->page_list, page_list) {
|
list_for_each_entry(page, &pool->page_list, page_list) {
|
||||||
if (dma < page->dma)
|
if (dma < page->dma)
|
||||||
continue;
|
continue;
|
||||||
if (dma < (page->dma + pool->allocation))
|
if (dma < (page->dma + pool->allocation))
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
page = NULL;
|
|
||||||
done:
|
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_pool_free - put block back into dma pool
|
* dma_pool_free - put block back into dma pool
|
||||||
|
@ -386,8 +381,10 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
page = pool_find_page(pool, dma);
|
page = pool_find_page(pool, dma);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
if (pool->dev)
|
if (pool->dev)
|
||||||
dev_err(pool->dev,
|
dev_err(pool->dev,
|
||||||
"dma_pool_free %s, %p/%lx (bad dma)\n",
|
"dma_pool_free %s, %p/%lx (bad dma)\n",
|
||||||
|
@ -401,6 +398,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||||
offset = vaddr - page->vaddr;
|
offset = vaddr - page->vaddr;
|
||||||
#ifdef DMAPOOL_DEBUG
|
#ifdef DMAPOOL_DEBUG
|
||||||
if ((dma - page->dma) != offset) {
|
if ((dma - page->dma) != offset) {
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
if (pool->dev)
|
if (pool->dev)
|
||||||
dev_err(pool->dev,
|
dev_err(pool->dev,
|
||||||
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
|
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
|
||||||
|
@ -418,6 +416,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||||
chain = *(int *)(page->vaddr + chain);
|
chain = *(int *)(page->vaddr + chain);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
if (pool->dev)
|
if (pool->dev)
|
||||||
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
|
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
|
||||||
"already free\n", pool->name,
|
"already free\n", pool->name,
|
||||||
|
@ -432,7 +431,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||||
memset(vaddr, POOL_POISON_FREED, pool->size);
|
memset(vaddr, POOL_POISON_FREED, pool->size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
|
||||||
page->in_use--;
|
page->in_use--;
|
||||||
*(int *)vaddr = page->offset;
|
*(int *)vaddr = page->offset;
|
||||||
page->offset = offset;
|
page->offset = offset;
|
||||||
|
|
Loading…
Reference in a new issue