Home
last modified time | relevance | path

Searched refs:async_tx (Results 1 – 25 of 30) sorted by relevance

12

/Linux-v4.19/drivers/dma/
Dmmp_pdma.c88 struct dma_async_tx_descriptor async_tx; member
135 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
332 set_desc(chan->phy, desc->async_tx.phys); in start_pending_queue()
350 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit()
374 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor()
376 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor()
377 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor()
420 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list()
485 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_memcpy()
487 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy()
[all …]
Diop-adma.c41 container_of(tx, struct iop_adma_desc_slot, async_tx)
64 struct dma_async_tx_descriptor *tx = &desc->async_tx; in iop_adma_run_tx_complete_actions()
94 if (!async_tx_test_ack(&desc->async_tx)) in iop_adma_clean_slot()
129 iter->async_tx.cookie, iter->idx, busy, in __iop_adma_slot_cleanup()
130 iter->async_tx.phys, iop_desc_get_next_desc(iter), in __iop_adma_slot_cleanup()
131 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup()
133 prefetch(&_iter->async_tx); in __iop_adma_slot_cleanup()
146 if (iter->async_tx.phys == current_desc) { in __iop_adma_slot_cleanup()
225 if (iter->xor_check_result && iter->async_tx.cookie) in __iop_adma_slot_cleanup()
288 prefetch(&_iter->async_tx); in iop_adma_alloc_slots()
[all …]
Dmv_xor_v2.c189 struct dma_async_tx_descriptor async_tx; member
315 container_of(tx, struct mv_xor_v2_sw_desc, async_tx); in mv_xor_v2_tx_submit()
321 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_v2_tx_submit()
362 if (async_tx_test_ack(&sw_desc->async_tx)) { in mv_xor_v2_prep_sw_desc()
402 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_memcpy()
431 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_memcpy()
458 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_xor()
490 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_xor()
520 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_interrupt()
584 if (next_pending_sw_desc->async_tx.cookie > 0) { in mv_xor_v2_tasklet()
[all …]
Dmv_xor.c51 container_of(tx, struct mv_xor_desc_slot, async_tx)
193 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_chan_start_new_chain()
204 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions()
206 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions()
207 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions()
209 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions()
213 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in mv_desc_run_tx_complete_actions()
217 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions()
231 if (async_tx_test_ack(&iter->async_tx)) { in mv_chan_clean_completed_slots()
247 __func__, __LINE__, desc, desc->async_tx.flags); in mv_desc_clean_slot()
[all …]
Dfsldma.c401 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue()
433 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit()
454 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsl_dma_free_descriptor()
475 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor()
476 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor()
477 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor()
498 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor()
514 struct dma_async_tx_descriptor *txd = &desc->async_tx; in fsldma_run_tx_complete_actions()
552 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor()
561 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_clean_running_descriptor()
[all …]
Daltera-msgdma.c163 struct dma_async_tx_descriptor async_tx; member
204 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
372 async_tx_ack(&first->async_tx); in msgdma_prep_memcpy()
373 first->async_tx.flags = flags; in msgdma_prep_memcpy()
375 return &first->async_tx; in msgdma_prep_memcpy()
457 first->async_tx.flags = flags; in msgdma_prep_slave_sg()
459 return &first->async_tx; in msgdma_prep_slave_sg()
594 callback = desc->async_tx.callback; in msgdma_chan_desc_cleanup()
595 callback_param = desc->async_tx.callback_param; in msgdma_chan_desc_cleanup()
620 dma_cookie_complete(&desc->async_tx); in msgdma_complete_descriptor()
[all …]
Dfsl_raid.c83 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
137 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done()
138 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done()
139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in fsl_re_desc_done()
149 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs()
256 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc()
257 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc()
289 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
306 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
384 return &desc->async_tx; in fsl_re_prep_dma_genq()
[all …]
Dnbpfaxi.c154 struct dma_async_tx_descriptor async_tx; member
633 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
644 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
651 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
671 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); in nbpf_tx_submit()
719 dma_async_tx_descriptor_init(&desc->async_tx, dchan); in nbpf_desc_page_alloc()
720 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc()
761 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { in nbpf_scan_acked()
854 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle()
952 desc->async_tx.flags = flags; in nbpf_prep_sg()
[all …]
Dfsldma.h109 struct dma_async_tx_descriptor async_tx; member
197 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
Dmv_xor.h155 struct dma_async_tx_descriptor async_tx; member
Dfsl_raid.h294 struct dma_async_tx_descriptor async_tx; member
/Linux-v4.19/drivers/dma/sh/
Dshdma-base.c76 container_of(tx, struct shdma_desc, async_tx); in shdma_tx_submit()
95 chunk->async_tx.cookie > 0 || in shdma_tx_submit()
96 chunk->async_tx.cookie == -EBUSY || in shdma_tx_submit()
101 chunk->async_tx.callback = callback; in shdma_tx_submit()
102 chunk->async_tx.callback_param = tx->callback_param; in shdma_tx_submit()
105 chunk->async_tx.callback = NULL; in shdma_tx_submit()
111 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit()
240 dma_async_tx_descriptor_init(&desc->async_tx, in shdma_alloc_chan_resources()
242 desc->async_tx.tx_submit = shdma_tx_submit; in shdma_alloc_chan_resources()
340 struct dma_async_tx_descriptor *tx = &desc->async_tx; in __ld_cleanup()
[all …]
Drcar-dmac.c73 struct dma_async_tx_descriptor async_tx; member
94 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
399 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
503 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc()
504 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc()
558 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
910 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
911 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
1007 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1308 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
[all …]
Dshdma.h61 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
/Linux-v4.19/drivers/dma/xilinx/
Dxilinx_dma.c304 struct dma_async_tx_descriptor async_tx; member
451 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
801 callback = desc->async_tx.callback; in xilinx_dma_chan_handle_cyclic()
802 callback_param = desc->async_tx.callback_param; in xilinx_dma_chan_handle_cyclic()
833 dmaengine_desc_get_callback(&desc->async_tx, &cb); in xilinx_dma_chan_desc_cleanup()
841 dma_run_dependencies(&desc->async_tx); in xilinx_dma_chan_desc_cleanup()
1097 desc->async_tx.phys); in xilinx_vdma_start_transfer()
1232 head_desc->async_tx.phys); in xilinx_cdma_start_transfer()
1298 head_desc->async_tx.phys); in xilinx_dma_start_transfer()
1303 head_desc->async_tx.phys); in xilinx_dma_start_transfer()
[all …]
Dzynqmp_dma.c150 async_tx)
187 struct dma_async_tx_descriptor async_tx; member
486 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in zynqmp_dma_alloc_chan_resources()
487 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; in zynqmp_dma_alloc_chan_resources()
609 callback = desc->async_tx.callback; in zynqmp_dma_chan_desc_cleanup()
610 callback_param = desc->async_tx.callback_param; in zynqmp_dma_chan_desc_cleanup()
635 dma_cookie_complete(&desc->async_tx); in zynqmp_dma_complete_descriptor()
840 async_tx_ack(&first->async_tx); in zynqmp_dma_prep_memcpy()
841 first->async_tx.flags = flags; in zynqmp_dma_prep_memcpy()
842 return &first->async_tx; in zynqmp_dma_prep_memcpy()
/Linux-v4.19/Documentation/crypto/
Dasync-tx-api.txt26 The async_tx API provides methods for describing a chain of asynchronous
87 async_tx call will implicitly set the acknowledged state.
163 See include/linux/async_tx.h for more information on the flags. See the
171 accommodate assumptions made by applications using the async_tx API:
222 include/linux/async_tx.h: core header file for the async_tx api
223 crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
224 crypto/async_tx/async_memcpy.c: copy offload
225 crypto/async_tx/async_xor.c: xor and xor zero sum offload
/Linux-v4.19/drivers/dma/ppc4xx/
Dadma.c1480 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions()
1481 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions()
1482 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions()
1483 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions()
1485 dma_descriptor_unmap(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1489 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in ppc440spe_adma_run_tx_complete_actions()
1493 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1507 if (!async_tx_test_ack(&desc->async_tx)) in ppc440spe_adma_clean_slot()
1573 iter->async_tx.cookie, iter->idx, busy, iter->phys, in __ppc440spe_adma_slot_cleanup()
1575 async_tx_test_ack(&iter->async_tx)); in __ppc440spe_adma_slot_cleanup()
[all …]
Dadma.h23 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
150 struct dma_async_tx_descriptor async_tx; member
/Linux-v4.19/crypto/async_tx/
DMakefile2 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
/Linux-v4.19/arch/arm/include/asm/hardware/
Diop_adma.h101 struct dma_async_tx_descriptor async_tx; member
Diop3xx-adma.h601 (u32) (desc->async_tx.phys + (i << 5)); in iop_desc_init_zero_sum()
/Linux-v4.19/include/linux/
Dshdma-base.h51 struct dma_async_tx_descriptor async_tx; member
/Linux-v4.19/crypto/
DMakefile156 obj-$(CONFIG_ASYNC_CORE) += async_tx/
/Linux-v4.19/Documentation/driver-api/dmaengine/
Dclient.rst7 .. note:: For DMA Engine usage in async_tx please see:
135 Although the async_tx API specifies that completion callback

12