[PATCH] IB/mthca: Use dma_alloc_coherent instead of pci_alloc_consistent
Switch all allocations of coherent memory from pci_alloc_consistent() to dma_alloc_coherent(), so that we can pass GFP_KERNEL. This should help when the system is low on memory. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
bb2af78bcd
commit
64dc81fca7
3 changed files with 29 additions and 27 deletions
|
@ -636,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
|
|||
int size;
|
||||
|
||||
if (cq->is_direct)
|
||||
pci_free_consistent(dev->pdev,
|
||||
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
|
||||
cq->queue.direct.buf,
|
||||
pci_unmap_addr(&cq->queue.direct,
|
||||
mapping));
|
||||
dma_free_coherent(&dev->pdev->dev,
|
||||
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
|
||||
cq->queue.direct.buf,
|
||||
pci_unmap_addr(&cq->queue.direct,
|
||||
mapping));
|
||||
else {
|
||||
size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
|
||||
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
|
||||
if (cq->queue.page_list[i].buf)
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
cq->queue.page_list[i].buf,
|
||||
pci_unmap_addr(&cq->queue.page_list[i],
|
||||
mapping));
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
cq->queue.page_list[i].buf,
|
||||
pci_unmap_addr(&cq->queue.page_list[i],
|
||||
mapping));
|
||||
|
||||
kfree(cq->queue.page_list);
|
||||
}
|
||||
|
@ -668,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
|
|||
npages = 1;
|
||||
shift = get_order(size) + PAGE_SHIFT;
|
||||
|
||||
cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,
|
||||
size, &t);
|
||||
cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
|
||||
size, &t, GFP_KERNEL);
|
||||
if (!cq->queue.direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -707,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
|
|||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
cq->queue.page_list[i].buf =
|
||||
pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
|
||||
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
&t, GFP_KERNEL);
|
||||
if (!cq->queue.page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
|
|
|
@ -501,8 +501,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
|
|||
eq_context = MAILBOX_ALIGN(mailbox);
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
eq->page_list[i].buf = pci_alloc_consistent(dev->pdev,
|
||||
PAGE_SIZE, &t);
|
||||
eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
|
||||
PAGE_SIZE, &t, GFP_KERNEL);
|
||||
if (!eq->page_list[i].buf)
|
||||
goto err_out_free;
|
||||
|
||||
|
@ -582,10 +582,10 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
|
|||
err_out_free:
|
||||
for (i = 0; i < npages; ++i)
|
||||
if (eq->page_list[i].buf)
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
eq->page_list[i].buf,
|
||||
pci_unmap_addr(&eq->page_list[i],
|
||||
mapping));
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
eq->page_list[i].buf,
|
||||
pci_unmap_addr(&eq->page_list[i],
|
||||
mapping));
|
||||
|
||||
kfree(eq->page_list);
|
||||
kfree(dma_list);
|
||||
|
|
|
@ -934,7 +934,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
|
|||
mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
|
||||
size, shift);
|
||||
|
||||
qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
|
||||
qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
|
||||
&t, GFP_KERNEL);
|
||||
if (!qp->queue.direct.buf)
|
||||
goto err_out;
|
||||
|
||||
|
@ -973,7 +974,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
|
|||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
qp->queue.page_list[i].buf =
|
||||
pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
|
||||
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
&t, GFP_KERNEL);
|
||||
if (!qp->queue.page_list[i].buf)
|
||||
goto err_out_free;
|
||||
|
||||
|
@ -996,16 +998,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
|
|||
|
||||
err_out_free:
|
||||
if (qp->is_direct) {
|
||||
pci_free_consistent(dev->pdev, size,
|
||||
qp->queue.direct.buf,
|
||||
pci_unmap_addr(&qp->queue.direct, mapping));
|
||||
dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
|
||||
pci_unmap_addr(&qp->queue.direct, mapping));
|
||||
} else
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (qp->queue.page_list[i].buf)
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
qp->queue.page_list[i].buf,
|
||||
pci_unmap_addr(&qp->queue.page_list[i],
|
||||
mapping));
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
qp->queue.page_list[i].buf,
|
||||
pci_unmap_addr(&qp->queue.page_list[i],
|
||||
mapping));
|
||||
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue