async_tx: export async_tx_quiesce
Replace open coded "wait and acknowledge" instances with async_tx_quiesce. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
669ab0b210
commit
d2c52b7983
5 changed files with 26 additions and 62 deletions
|
@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
||||||
pr_debug("%s: (sync) len: %zu\n", __func__, len);
|
pr_debug("%s: (sync) len: %zu\n", __func__, len);
|
||||||
|
|
||||||
/* wait for any prerequisite operations */
|
/* wait for any prerequisite operations */
|
||||||
if (depend_tx) {
|
async_tx_quiesce(&depend_tx);
|
||||||
/* if ack is already set then we cannot be sure
|
|
||||||
* we are referring to the correct operation
|
|
||||||
*/
|
|
||||||
BUG_ON(async_tx_test_ack(depend_tx));
|
|
||||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
|
||||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
|
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
|
||||||
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
|
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
|
||||||
|
|
|
@ -72,15 +72,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
|
||||||
dest_buf = (void *) (((char *) page_address(dest)) + offset);
|
dest_buf = (void *) (((char *) page_address(dest)) + offset);
|
||||||
|
|
||||||
/* wait for any prerequisite operations */
|
/* wait for any prerequisite operations */
|
||||||
if (depend_tx) {
|
async_tx_quiesce(&depend_tx);
|
||||||
/* if ack is already set then we cannot be sure
|
|
||||||
* we are referring to the correct operation
|
|
||||||
*/
|
|
||||||
BUG_ON(async_tx_test_ack(depend_tx));
|
|
||||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
|
||||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(dest_buf, val, len);
|
memset(dest_buf, val, len);
|
||||||
|
|
||||||
|
|
|
@ -607,15 +607,7 @@ async_trigger_callback(enum async_tx_flags flags,
|
||||||
pr_debug("%s: (sync)\n", __func__);
|
pr_debug("%s: (sync)\n", __func__);
|
||||||
|
|
||||||
/* wait for any prerequisite operations */
|
/* wait for any prerequisite operations */
|
||||||
if (depend_tx) {
|
async_tx_quiesce(&depend_tx);
|
||||||
/* if ack is already set then we cannot be sure
|
|
||||||
* we are referring to the correct operation
|
|
||||||
*/
|
|
||||||
BUG_ON(async_tx_test_ack(depend_tx));
|
|
||||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
|
||||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||||
}
|
}
|
||||||
|
@ -624,6 +616,25 @@ async_trigger_callback(enum async_tx_flags flags,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(async_trigger_callback);
|
EXPORT_SYMBOL_GPL(async_trigger_callback);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* async_tx_quiesce - ensure tx is complete and freeable upon return
|
||||||
|
* @tx - transaction to quiesce
|
||||||
|
*/
|
||||||
|
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
|
||||||
|
{
|
||||||
|
if (*tx) {
|
||||||
|
/* if ack is already set then we cannot be sure
|
||||||
|
* we are referring to the correct operation
|
||||||
|
*/
|
||||||
|
BUG_ON(async_tx_test_ack(*tx));
|
||||||
|
if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
|
||||||
|
panic("DMA_ERROR waiting for transaction\n");
|
||||||
|
async_tx_ack(*tx);
|
||||||
|
*tx = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(async_tx_quiesce);
|
||||||
|
|
||||||
module_init(async_tx_init);
|
module_init(async_tx_init);
|
||||||
module_exit(async_tx_exit);
|
module_exit(async_tx_exit);
|
||||||
|
|
||||||
|
|
|
@ -30,24 +30,6 @@
|
||||||
#include <linux/raid/xor.h>
|
#include <linux/raid/xor.h>
|
||||||
#include <linux/async_tx.h>
|
#include <linux/async_tx.h>
|
||||||
|
|
||||||
/**
|
|
||||||
* async_tx_quiesce - ensure tx is complete and freeable upon return
|
|
||||||
* @tx - transaction to quiesce
|
|
||||||
*/
|
|
||||||
static void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
|
|
||||||
{
|
|
||||||
if (*tx) {
|
|
||||||
/* if ack is already set then we cannot be sure
|
|
||||||
* we are referring to the correct operation
|
|
||||||
*/
|
|
||||||
BUG_ON(async_tx_test_ack(*tx));
|
|
||||||
if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
|
|
||||||
panic("DMA_ERROR waiting for transaction\n");
|
|
||||||
async_tx_ack(*tx);
|
|
||||||
*tx = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* do_async_xor - dma map the pages and perform the xor with an engine.
|
/* do_async_xor - dma map the pages and perform the xor with an engine.
|
||||||
* This routine is marked __always_inline so it can be compiled away
|
* This routine is marked __always_inline so it can be compiled away
|
||||||
* when CONFIG_DMA_ENGINE=n
|
* when CONFIG_DMA_ENGINE=n
|
||||||
|
@ -219,15 +201,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wait for any prerequisite operations */
|
/* wait for any prerequisite operations */
|
||||||
if (depend_tx) {
|
async_tx_quiesce(&depend_tx);
|
||||||
/* if ack is already set then we cannot be sure
|
|
||||||
* we are referring to the correct operation
|
|
||||||
*/
|
|
||||||
BUG_ON(async_tx_test_ack(depend_tx));
|
|
||||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
|
||||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
do_sync_xor(dest, src_list, offset, src_cnt, len,
|
do_sync_xor(dest, src_list, offset, src_cnt, len,
|
||||||
flags, depend_tx, cb_fn, cb_param);
|
flags, depend_tx, cb_fn, cb_param);
|
||||||
|
@ -309,17 +283,10 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
|
||||||
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
|
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
|
||||||
depend_tx, NULL, NULL);
|
depend_tx, NULL, NULL);
|
||||||
|
|
||||||
if (tx) {
|
async_tx_quiesce(&tx);
|
||||||
if (dma_wait_for_async_tx(tx) == DMA_ERROR)
|
|
||||||
panic("%s: DMA_ERROR waiting for tx\n",
|
|
||||||
__func__);
|
|
||||||
async_tx_ack(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
*result = page_is_zero(dest, offset, len) ? 0 : 1;
|
*result = page_is_zero(dest, offset, len) ? 0 : 1;
|
||||||
|
|
||||||
tx = NULL;
|
|
||||||
|
|
||||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,4 +152,6 @@ struct dma_async_tx_descriptor *
|
||||||
async_trigger_callback(enum async_tx_flags flags,
|
async_trigger_callback(enum async_tx_flags flags,
|
||||||
struct dma_async_tx_descriptor *depend_tx,
|
struct dma_async_tx_descriptor *depend_tx,
|
||||||
dma_async_tx_callback cb_fn, void *cb_fn_param);
|
dma_async_tx_callback cb_fn, void *cb_fn_param);
|
||||||
|
|
||||||
|
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
|
||||||
#endif /* _ASYNC_TX_H_ */
|
#endif /* _ASYNC_TX_H_ */
|
||||||
|
|
Loading…
Reference in a new issue