| /Linux-v5.4/drivers/dma/ | 
| D | mmp_pdma.c | 85 	struct dma_async_tx_descriptor async_tx;  member133 	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
 334 	set_desc(chan->phy, desc->async_tx.phys);  in start_pending_queue()
 352 		cookie = dma_cookie_assign(&child->async_tx);  in mmp_pdma_tx_submit()
 376 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);  in mmp_pdma_alloc_descriptor()
 378 	desc->async_tx.tx_submit = mmp_pdma_tx_submit;  in mmp_pdma_alloc_descriptor()
 379 	desc->async_tx.phys = pdesc;  in mmp_pdma_alloc_descriptor()
 422 		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);  in mmp_pdma_free_desc_list()
 487 			prev->desc.ddadr = new->async_tx.phys;  in mmp_pdma_prep_memcpy()
 489 		new->async_tx.cookie = 0;  in mmp_pdma_prep_memcpy()
 [all …]
 
 | 
| D | mv_xor_v2.c | 179 	struct dma_async_tx_descriptor async_tx;  member304 		container_of(tx, struct mv_xor_v2_sw_desc, async_tx);  in mv_xor_v2_tx_submit()
 310 		__func__, sw_desc, &sw_desc->async_tx);  in mv_xor_v2_tx_submit()
 351 		if (async_tx_test_ack(&sw_desc->async_tx)) {  in mv_xor_v2_prep_sw_desc()
 391 	sw_desc->async_tx.flags = flags;  in mv_xor_v2_prep_dma_memcpy()
 420 	return &sw_desc->async_tx;  in mv_xor_v2_prep_dma_memcpy()
 447 	sw_desc->async_tx.flags = flags;  in mv_xor_v2_prep_dma_xor()
 479 	return &sw_desc->async_tx;  in mv_xor_v2_prep_dma_xor()
 509 	return &sw_desc->async_tx;  in mv_xor_v2_prep_dma_interrupt()
 573 		if (next_pending_sw_desc->async_tx.cookie > 0) {  in mv_xor_v2_tasklet()
 [all …]
 
 | 
| D | iop-adma.c | 32 	container_of(tx, struct iop_adma_desc_slot, async_tx)55 	struct dma_async_tx_descriptor *tx = &desc->async_tx;  in iop_adma_run_tx_complete_actions()
 85 	if (!async_tx_test_ack(&desc->async_tx))  in iop_adma_clean_slot()
 120 			iter->async_tx.cookie, iter->idx, busy,  in __iop_adma_slot_cleanup()
 121 			&iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),  in __iop_adma_slot_cleanup()
 122 			async_tx_test_ack(&iter->async_tx));  in __iop_adma_slot_cleanup()
 124 		prefetch(&_iter->async_tx);  in __iop_adma_slot_cleanup()
 137 		if (iter->async_tx.phys == current_desc) {  in __iop_adma_slot_cleanup()
 216 		if (iter->xor_check_result && iter->async_tx.cookie)  in __iop_adma_slot_cleanup()
 279 		prefetch(&_iter->async_tx);  in iop_adma_alloc_slots()
 [all …]
 
 | 
| D | mv_xor.c | 43 	container_of(tx, struct mv_xor_desc_slot, async_tx)185 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);  in mv_chan_start_new_chain()
 196 	BUG_ON(desc->async_tx.cookie < 0);  in mv_desc_run_tx_complete_actions()
 198 	if (desc->async_tx.cookie > 0) {  in mv_desc_run_tx_complete_actions()
 199 		cookie = desc->async_tx.cookie;  in mv_desc_run_tx_complete_actions()
 201 		dma_descriptor_unmap(&desc->async_tx);  in mv_desc_run_tx_complete_actions()
 205 		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);  in mv_desc_run_tx_complete_actions()
 209 	dma_run_dependencies(&desc->async_tx);  in mv_desc_run_tx_complete_actions()
 223 		if (async_tx_test_ack(&iter->async_tx)) {  in mv_chan_clean_completed_slots()
 239 		__func__, __LINE__, desc, desc->async_tx.flags);  in mv_desc_clean_slot()
 [all …]
 
 | 
| D | fsldma.c | 396 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);  in append_ld_queue()428 		cookie = dma_cookie_assign(&child->async_tx);  in fsl_dma_tx_submit()
 449 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);  in fsl_dma_free_descriptor()
 470 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);  in fsl_dma_alloc_descriptor()
 471 	desc->async_tx.tx_submit = fsl_dma_tx_submit;  in fsl_dma_alloc_descriptor()
 472 	desc->async_tx.phys = pdesc;  in fsl_dma_alloc_descriptor()
 493 		if (async_tx_test_ack(&desc->async_tx))  in fsldma_clean_completed_descriptor()
 509 	struct dma_async_tx_descriptor *txd = &desc->async_tx;  in fsldma_run_tx_complete_actions()
 547 	if (!async_tx_test_ack(&desc->async_tx)) {  in fsldma_clean_running_descriptor()
 556 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);  in fsldma_clean_running_descriptor()
 [all …]
 
 | 
| D | altera-msgdma.c | 159 	struct dma_async_tx_descriptor async_tx;  member200 #define tx_to_desc(tx)	container_of(tx, struct msgdma_sw_desc, async_tx)
 368 	async_tx_ack(&first->async_tx);  in msgdma_prep_memcpy()
 369 	first->async_tx.flags = flags;  in msgdma_prep_memcpy()
 371 	return &first->async_tx;  in msgdma_prep_memcpy()
 453 	first->async_tx.flags = flags;  in msgdma_prep_slave_sg()
 455 	return &first->async_tx;  in msgdma_prep_slave_sg()
 590 		callback = desc->async_tx.callback;  in msgdma_chan_desc_cleanup()
 591 		callback_param = desc->async_tx.callback_param;  in msgdma_chan_desc_cleanup()
 616 	dma_cookie_complete(&desc->async_tx);  in msgdma_complete_descriptor()
 [all …]
 
 | 
| D | fsl_raid.c | 83 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)137 	dma_cookie_complete(&desc->async_tx);  in fsl_re_desc_done()
 138 	dma_descriptor_unmap(&desc->async_tx);  in fsl_re_desc_done()
 139 	dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);  in fsl_re_desc_done()
 149 		if (async_tx_test_ack(&desc->async_tx))  in fsl_re_cleanup_descs()
 256 	desc->async_tx.tx_submit = fsl_re_tx_submit;  in fsl_re_init_desc()
 257 	dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);  in fsl_re_init_desc()
 289 		desc->async_tx.flags = flags;  in fsl_re_chan_alloc_desc()
 306 		desc->async_tx.flags = flags;  in fsl_re_chan_alloc_desc()
 384 	return &desc->async_tx;  in fsl_re_prep_dma_genq()
 [all …]
 
 | 
| D | nbpfaxi.c | 151 	struct dma_async_tx_descriptor async_tx;  member630 		running = chan->running ? chan->running->async_tx.cookie : -EINVAL;  in nbpf_tx_status()
 641 				if (desc->async_tx.cookie == cookie) {  in nbpf_tx_status()
 648 					if (desc->async_tx.cookie == cookie) {  in nbpf_tx_status()
 668 	struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);  in nbpf_tx_submit()
 716 		dma_async_tx_descriptor_init(&desc->async_tx, dchan);  in nbpf_desc_page_alloc()
 717 		desc->async_tx.tx_submit = nbpf_tx_submit;  in nbpf_desc_page_alloc()
 758 		if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {  in nbpf_scan_acked()
 851 			__func__, desc, desc->async_tx.cookie);  in nbpf_chan_idle()
 949 	desc->async_tx.flags = flags;  in nbpf_prep_sg()
 [all …]
 
 | 
| D | fsldma.h | 104 	struct dma_async_tx_descriptor async_tx;  member192 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
 
 | 
| D | mv_xor.h | 147 	struct dma_async_tx_descriptor	async_tx;  member
 | 
| D | fsl_raid.h | 294 	struct dma_async_tx_descriptor async_tx;  member
 | 
| D | iop-adma.h | 583 				(u32) (desc->async_tx.phys + (i << 5));  in iop_desc_init_zero_sum()
 | 
| /Linux-v5.4/drivers/dma/sh/ | 
| D | shdma-base.c | 73 		container_of(tx, struct shdma_desc, async_tx);  in shdma_tx_submit()92 				      chunk->async_tx.cookie > 0 ||  in shdma_tx_submit()
 93 				      chunk->async_tx.cookie == -EBUSY ||  in shdma_tx_submit()
 98 			chunk->async_tx.callback = callback;  in shdma_tx_submit()
 99 			chunk->async_tx.callback_param = tx->callback_param;  in shdma_tx_submit()
 102 			chunk->async_tx.callback = NULL;  in shdma_tx_submit()
 108 			tx->cookie, &chunk->async_tx, schan->id);  in shdma_tx_submit()
 237 		dma_async_tx_descriptor_init(&desc->async_tx,  in shdma_alloc_chan_resources()
 239 		desc->async_tx.tx_submit = shdma_tx_submit;  in shdma_alloc_chan_resources()
 337 		struct dma_async_tx_descriptor *tx = &desc->async_tx;  in __ld_cleanup()
 [all …]
 
 | 
| D | rcar-dmac.c | 73 	struct dma_async_tx_descriptor async_tx;  member94 #define to_rcar_dmac_desc(d)	container_of(d, struct rcar_dmac_desc, async_tx)
 402 		else if (desc->async_tx.callback)  in rcar_dmac_chan_start_xfer()
 506 		dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);  in rcar_dmac_desc_alloc()
 507 		desc->async_tx.tx_submit = rcar_dmac_tx_submit;  in rcar_dmac_desc_alloc()
 561 		if (async_tx_test_ack(&desc->async_tx)) {  in rcar_dmac_desc_recycle_acked()
 916 	desc->async_tx.flags = dma_flags;  in rcar_dmac_chan_prep_sg()
 917 	desc->async_tx.cookie = -EBUSY;  in rcar_dmac_chan_prep_sg()
 1013 	return &desc->async_tx;  in rcar_dmac_chan_prep_sg()
 1317 	if (cookie != desc->async_tx.cookie) {  in rcar_dmac_chan_get_residue()
 [all …]
 
 | 
| D | shdma.h | 57 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
 | 
| /Linux-v5.4/drivers/dma/xilinx/ | 
| D | xilinx_dma.c | 308 	struct dma_async_tx_descriptor async_tx;  member455 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
 805 	callback = desc->async_tx.callback;  in xilinx_dma_chan_handle_cyclic()
 806 	callback_param = desc->async_tx.callback_param;  in xilinx_dma_chan_handle_cyclic()
 837 		dmaengine_desc_get_callback(&desc->async_tx, &cb);  in xilinx_dma_chan_desc_cleanup()
 845 		dma_run_dependencies(&desc->async_tx);  in xilinx_dma_chan_desc_cleanup()
 1238 			     head_desc->async_tx.phys);  in xilinx_cdma_start_transfer()
 1306 			     head_desc->async_tx.phys);  in xilinx_dma_start_transfer()
 1311 				       head_desc->async_tx.phys);  in xilinx_dma_start_transfer()
 1315 				       head_desc->async_tx.phys);  in xilinx_dma_start_transfer()
 [all …]
 
 | 
| D | zynqmp_dma.c | 146 					     async_tx)183 	struct dma_async_tx_descriptor async_tx;  member
 484 		dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);  in zynqmp_dma_alloc_chan_resources()
 485 		desc->async_tx.tx_submit = zynqmp_dma_tx_submit;  in zynqmp_dma_alloc_chan_resources()
 607 		callback = desc->async_tx.callback;  in zynqmp_dma_chan_desc_cleanup()
 608 		callback_param = desc->async_tx.callback_param;  in zynqmp_dma_chan_desc_cleanup()
 633 	dma_cookie_complete(&desc->async_tx);  in zynqmp_dma_complete_descriptor()
 843 	async_tx_ack(&first->async_tx);  in zynqmp_dma_prep_memcpy()
 844 	first->async_tx.flags = flags;  in zynqmp_dma_prep_memcpy()
 845 	return &first->async_tx;  in zynqmp_dma_prep_memcpy()
 
 | 
| /Linux-v5.4/Documentation/crypto/ | 
| D | async-tx-api.txt | 26 The async_tx API provides methods for describing a chain of asynchronous87    async_tx call will implicitly set the acknowledged state.
 163 See include/linux/async_tx.h for more information on the flags.  See the
 171 accommodate assumptions made by applications using the async_tx API:
 222 include/linux/async_tx.h: core header file for the async_tx api
 223 crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
 224 crypto/async_tx/async_memcpy.c: copy offload
 225 crypto/async_tx/async_xor.c: xor and xor zero sum offload
 
 | 
| /Linux-v5.4/drivers/dma/ppc4xx/ | 
| D | adma.c | 1468 	BUG_ON(desc->async_tx.cookie < 0);  in ppc440spe_adma_run_tx_complete_actions()1469 	if (desc->async_tx.cookie > 0) {  in ppc440spe_adma_run_tx_complete_actions()
 1470 		cookie = desc->async_tx.cookie;  in ppc440spe_adma_run_tx_complete_actions()
 1471 		desc->async_tx.cookie = 0;  in ppc440spe_adma_run_tx_complete_actions()
 1473 		dma_descriptor_unmap(&desc->async_tx);  in ppc440spe_adma_run_tx_complete_actions()
 1477 		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);  in ppc440spe_adma_run_tx_complete_actions()
 1481 	dma_run_dependencies(&desc->async_tx);  in ppc440spe_adma_run_tx_complete_actions()
 1495 	if (!async_tx_test_ack(&desc->async_tx))  in ppc440spe_adma_clean_slot()
 1561 		    iter->async_tx.cookie, iter->idx, busy, iter->phys,  in __ppc440spe_adma_slot_cleanup()
 1563 		    async_tx_test_ack(&iter->async_tx));  in __ppc440spe_adma_slot_cleanup()
 [all …]
 
 | 
| D | adma.h | 23 		container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)150 	struct dma_async_tx_descriptor async_tx;  member
 
 | 
| /Linux-v5.4/crypto/async_tx/ | 
| D | Makefile | 2 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
 | 
| /Linux-v5.4/include/linux/platform_data/ | 
| D | dma-iop32x.h | 92 	struct dma_async_tx_descriptor async_tx;  member
 | 
| /Linux-v5.4/include/linux/ | 
| D | shdma-base.h | 48 	struct dma_async_tx_descriptor async_tx;  member
 | 
| /Linux-v5.4/Documentation/driver-api/dmaengine/ | 
| D | client.rst | 7 .. note:: For DMA Engine usage in async_tx please see:135      Although the async_tx API specifies that completion callback
 
 | 
| /Linux-v5.4/crypto/ | 
| D | Makefile | 186 obj-$(CONFIG_ASYNC_CORE) += async_tx/
 |