async_tx/raid6: add missing dma_unmap calls to the async fail case
If we are unable to offload async_mult() or async_sum_product(), then unmap the buffers before falling through to the synchronous path. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
cdef57dbb6
commit
1f6672d44c
1 changed files with 13 additions and 0 deletions
|
@ -55,6 +55,13 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
|
|||
async_tx_submit(chan, tx, submit);
|
||||
return tx;
|
||||
}
|
||||
|
||||
/* could not get a descriptor, unmap and fall through to
|
||||
* the synchronous path
|
||||
*/
|
||||
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
|
||||
dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* run the operation synchronously */
|
||||
|
@ -101,6 +108,12 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
|
|||
async_tx_submit(chan, tx, submit);
|
||||
return tx;
|
||||
}
|
||||
|
||||
/* could not get a descriptor, unmap and fall through to
|
||||
* the synchronous path
|
||||
*/
|
||||
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* no channel available, or failed to allocate a descriptor, so
|
||||
|
|
Loading…
Reference in a new issue