Lines Matching +full:dma +full:- +full:channel +full:- +full:mask
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
18 * typedef dma_cookie_t - an opaque DMA cookie
20 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
31 * enum dma_status - DMA transaction status
46 * enum dma_transaction_type - DMA transaction types/indexes
49 * automatically set as dma devices are registered.
68 /* last transaction type for creation of the capabilities mask */
73 * enum dma_transfer_direction - dma transfer mode and direction indicator
89 * ----------------------------
91 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
96 * it is to be repeated and other per-transfer attributes.
103 * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
111 * struct data_chunk - Element of scatter-gather list that makes a frame.
133 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
164 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
166 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
168 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
171 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
172 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
173 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
176 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
180 * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
184 * repeated when it ends until a transaction is issued on the same channel
208 * enum sum_check_bits - bit position of pq_check_flags
216 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
217 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
218 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
227 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
233 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
239 * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
240 * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
245 * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
251 * - DMA_DEV_TO_MEM:
259 * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
270 * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
275 * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
278 * - DMA_DEV_TO_MEM:
301 * struct dma_router - DMA router structure
302 * @dev: pointer to the DMA router device
311 * struct dma_chan - devices supply DMA channels, clients use them
312 * @device: ptr to the dma device who supplies this channel, always !%NULL
313 * @slave: ptr to the device using this channel
315 * @completed_cookie: last completed cookie for this channel
316 * @chan_id: channel ID for sysfs
320 * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
322 * @local: per-cpu pointer to a struct dma_chan_percpu
323 * @client_count: how many clients are using this channel
324 * @table_count: number of appearances in the mem-to-mem allocation table
325 * @router: pointer to the DMA router structure
326 * @route_data: channel specific data for the router
327 * @private: private data for certain client-channel associations
348 /* DMA router */
356 * struct dma_chan_dev - relate sysfs device node to backing channel device
357 * @chan: driver channel device
368 * enum dma_slave_buswidth - defines bus width of the DMA slave
384 * struct dma_slave_config - dma slave channel runtime config
386 * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
390 * @src_addr: this is the physical address where DMA slave data
393 * @dst_addr: this is the physical address where DMA slave data
397 * register where DMA data shall be read. If the source
411 * an area instead of a single register to receive the data. Typically the DMA
418 * @slave_id: Slave requester id. Only valid for slave channels. The dma
419 * slave peripheral will have unique id as dma requester which need to be
422 * This struct is passed in as configuration data to a DMA engine
423 * in order to set up a certain channel for DMA transport at runtime.
424 * The DMA device/engine has to provide support for an additional
429 * follows: if it is likely that more than one DMA slave controllers in
449 * enum dma_residue_granularity - Granularity of the reported transfer residue
451 * DMA channel is only able to tell whether a descriptor has been completed or
452 * not, which means residue reporting is not supported by this channel. The
459 * the hardware supports scatter-gather and the segment descriptor has a field
475 * struct dma_slave_caps - expose capabilities of a slave channel only
476 * @src_addr_widths: bit mask of src addr widths the channel supports.
477 * Width is specified in bytes, e.g. for a channel supporting
478 * a width of 4 the mask should have BIT(4) set.
479 * @dst_addr_widths: bit mask of dst addr widths the channel supports
480 * @directions: bit mask of slave directions the channel supports.
482 * each type, the dma controller should set BIT(<TYPE>) and same
484 * @min_burst: min burst capability per-transfer
485 * @max_burst: max burst capability per-transfer
487 * DMA tansaction with no software intervention for reinitialization.
513 return dev_name(&chan->dev->device); in dma_chan_name()
519 * typedef dma_filter_fn - callback filter for dma_request_channel
520 * @chan: channel to be reviewed
524 * suitable channel is passed to this routine for further dispositioning before
525 * being returned. Where 'suitable' indicates a non-busy channel that
526 * satisfies the given capability mask. It returns 'true' to indicate that the
527 * channel is suitable.
535 DMA_TRANS_READ_FAILED, /* Source DMA read failed */
536 DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
576 * struct dma_async_tx_descriptor - async transaction descriptor
577 * ---dma generic offload fields---
578 * @cookie: tracking cookie for this transaction, set to -EBUSY if
583 * @chan: target channel for this operation
591 * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
592 * DMA driver if metadata mode is supported with the descriptor
593 * ---async_tx api specific fields---
622 kref_get(&unmap->kref); in dma_set_unmap()
623 tx->unmap = unmap; in dma_set_unmap()
646 if (!tx->unmap) in dma_descriptor_unmap()
649 dmaengine_unmap_put(tx->unmap); in dma_descriptor_unmap()
650 tx->unmap = NULL; in dma_descriptor_unmap()
682 spin_lock_bh(&txd->lock); in txd_lock()
686 spin_unlock_bh(&txd->lock); in txd_unlock()
690 txd->next = next; in txd_chain()
691 next->parent = txd; in txd_chain()
695 txd->parent = NULL; in txd_clear_parent()
699 txd->next = NULL; in txd_clear_next()
703 return txd->parent; in txd_parent()
707 return txd->next; in txd_next()
712 * struct dma_tx_state - filled in to report the status of
714 * @last: last completed DMA cookie
715 * @used: last issued DMA cookie (i.e. the one in progress)
719 * @in_flight_bytes: amount of data in bytes cached by the DMA.
729 * enum dmaengine_alignment - defines alignment of the DMA async tx
743 * struct dma_slave_map - associates slave device and it's slave channel with
746 * @slave: slave channel name
756 * struct dma_filter - information for slave device/channel to filter_fn/param
759 * @mapcnt: number of slave device/channel in the map
760 * @map: array of channel to filter mapping data
769 * struct dma_device - info on the entity supplying DMA services
770 * @chancnt: how many DMA channels are supported
771 * @privatecnt: how many DMA channels are requested by dma_request_channel
776 * @desc_metadata_modes: supported metadata modes by the DMA device
778 * @max_pq: maximum number of PQ sources and PQ-continue capability
784 * @dev: struct device reference for dma mapping api
786 * @src_addr_widths: bit mask of src addr widths the device supports
788 * a width of 4 the mask should have BIT(4) set.
789 * @dst_addr_widths: bit mask of dst addr widths the device supports
790 * @directions: bit mask of slave directions the device supports.
792 * each type, the dma controller should set BIT(<TYPE>) and same
794 * @min_burst: min burst capability per-transfer
795 * @max_burst: max burst capability per-transfer
797 * DMA tansaction with no software intervention for reinitialization.
803 * @device_free_chan_resources: release DMA channel's resources
812 * @device_prep_slave_sg: prepares a slave dma operation
813 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
817 * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
818 * @device_caps: May be used to override the generic DMA slave capabilities
819 * with per-channel specific ones
820 * @device_config: Pushes a new configuration to a channel, return 0 or an error
822 * @device_pause: Pauses any transfer happening on a channel. Returns
824 * @device_resume: Resumes any transfer on a channel previously
826 * @device_terminate_all: Aborts all transfers on a channel. Returns 0
944 if (chan->device->device_config) in dmaengine_slave_config()
945 return chan->device->device_config(chan, config); in dmaengine_slave_config()
947 return -ENOSYS; in dmaengine_slave_config()
964 if (!chan || !chan->device || !chan->device->device_prep_slave_sg) in dmaengine_prep_slave_single()
967 return chan->device->device_prep_slave_sg(chan, &sg, 1, in dmaengine_prep_slave_single()
975 if (!chan || !chan->device || !chan->device->device_prep_slave_sg) in dmaengine_prep_slave_sg()
978 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, in dmaengine_prep_slave_sg()
989 if (!chan || !chan->device || !chan->device->device_prep_slave_sg) in dmaengine_prep_rio_sg()
992 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, in dmaengine_prep_rio_sg()
1002 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) in dmaengine_prep_dma_cyclic()
1005 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, in dmaengine_prep_dma_cyclic()
1013 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) in dmaengine_prep_interleaved_dma()
1016 !test_bit(DMA_REPEAT, chan->device->cap_mask.bits)) in dmaengine_prep_interleaved_dma()
1019 return chan->device->device_prep_interleaved_dma(chan, xt, flags); in dmaengine_prep_interleaved_dma()
1026 if (!chan || !chan->device || !chan->device->device_prep_dma_memset) in dmaengine_prep_dma_memset()
1029 return chan->device->device_prep_dma_memset(chan, dest, value, in dmaengine_prep_dma_memset()
1037 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy) in dmaengine_prep_dma_memcpy()
1040 return chan->device->device_prep_dma_memcpy(chan, dest, src, in dmaengine_prep_dma_memcpy()
1050 return !!(chan->device->desc_metadata_modes & mode); in dmaengine_is_metadata_mode_supported()
1064 return -EINVAL; in dmaengine_desc_attach_metadata()
1075 return -EINVAL; in dmaengine_desc_set_metadata_len()
1080 * dmaengine_terminate_all() - Terminate all active DMA transfers
1081 * @chan: The channel for which to terminate the transfers
1088 if (chan->device->device_terminate_all) in dmaengine_terminate_all()
1089 return chan->device->device_terminate_all(chan); in dmaengine_terminate_all()
1091 return -ENOSYS; in dmaengine_terminate_all()
1095 * dmaengine_terminate_async() - Terminate all active DMA transfers
1096 * @chan: The channel for which to terminate the transfers
1099 * that have previously been submitted to the channel. It is not guaranteed
1110 * complete callback of a descriptor submitted on the same channel.
1117 if (chan->device->device_terminate_all) in dmaengine_terminate_async()
1118 return chan->device->device_terminate_all(chan); in dmaengine_terminate_async()
1120 return -EINVAL; in dmaengine_terminate_async()
1124 * dmaengine_synchronize() - Synchronize DMA channel termination
1125 * @chan: The channel to synchronize
1127 * Synchronizes to the DMA channel termination to the current context. When this
1137 * This function must only be called from non-atomic context and must not be
1139 * channel.
1145 if (chan->device->device_synchronize) in dmaengine_synchronize()
1146 chan->device->device_synchronize(chan); in dmaengine_synchronize()
1150 * dmaengine_terminate_sync() - Terminate all active DMA transfers
1151 * @chan: The channel for which to terminate the transfers
1154 * that have previously been submitted to the channel. It is similar to
1155 * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
1159 * This function must only be called from non-atomic context and must not be
1161 * channel.
1178 if (chan->device->device_pause) in dmaengine_pause()
1179 return chan->device->device_pause(chan); in dmaengine_pause()
1181 return -ENOSYS; in dmaengine_pause()
1186 if (chan->device->device_resume) in dmaengine_resume()
1187 return chan->device->device_resume(chan); in dmaengine_resume()
1189 return -ENOSYS; in dmaengine_resume()
1195 return chan->device->device_tx_status(chan, cookie, state); in dmaengine_tx_status()
1200 return desc->tx_submit(desc); in dmaengine_submit()
1206 return !(((1 << align) - 1) & (off1 | off2 | len)); in dmaengine_check_align()
1212 return dmaengine_check_align(dev->copy_align, off1, off2, len); in is_dma_copy_aligned()
1218 return dmaengine_check_align(dev->xor_align, off1, off2, len); in is_dma_xor_aligned()
1224 return dmaengine_check_align(dev->pq_align, off1, off2, len); in is_dma_pq_aligned()
1230 return dmaengine_check_align(dev->fill_align, off1, off2, len); in is_dma_fill_aligned()
1234 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) in dma_set_maxpq() argument
1236 dma->max_pq = maxpq; in dma_set_maxpq()
1238 dma->max_pq |= DMA_HAS_PQ_CONTINUE; in dma_set_maxpq()
1248 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; in dmaf_p_disabled_continue() local
1250 return (flags & mask) == mask; in dmaf_p_disabled_continue()
1253 static inline bool dma_dev_has_pq_continue(struct dma_device *dma) in dma_dev_has_pq_continue() argument
1255 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; in dma_dev_has_pq_continue()
1258 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) in dma_dev_to_maxpq() argument
1260 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; in dma_dev_to_maxpq()
1263 /* dma_maxpq - reduce maxpq in the face of continued operations
1264 * @dma - dma device with PQ capability
1265 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
1276 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) in dma_maxpq() argument
1278 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) in dma_maxpq()
1279 return dma_dev_to_maxpq(dma); in dma_maxpq()
1281 return dma_dev_to_maxpq(dma) - 1; in dma_maxpq()
1283 return dma_dev_to_maxpq(dma) - 3; in dma_maxpq()
1303 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, in dmaengine_get_dst_icg()
1304 chunk->icg, chunk->dst_icg); in dmaengine_get_dst_icg()
1310 return dmaengine_get_icg(xt->src_inc, xt->src_sgl, in dmaengine_get_src_icg()
1311 chunk->icg, chunk->src_icg); in dmaengine_get_src_icg()
1314 /* --- public DMA engine API --- */
1354 tx->flags |= DMA_CTRL_ACK; in async_tx_ack()
1359 tx->flags &= ~DMA_CTRL_ACK; in async_tx_clear_ack()
1364 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; in async_tx_test_ack()
1367 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) argument
1371 set_bit(tx_type, dstp->bits); in __dma_cap_set()
1374 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) argument
1378 clear_bit(tx_type, dstp->bits); in __dma_cap_clear()
1381 #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) argument
1384 bitmap_zero(dstp->bits, DMA_TX_TYPE_END); in __dma_cap_zero()
1387 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) argument
1391 return test_bit(tx_type, srcp->bits); in __dma_has_cap()
1394 #define for_each_dma_cap_mask(cap, mask) \ argument
1395 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1398 * dma_async_issue_pending - flush pending transactions to HW
1399 * @chan: target DMA channel
1406 chan->device->device_issue_pending(chan); in dma_async_issue_pending()
1410 * dma_async_is_tx_complete - poll for transaction completion
1411 * @chan: DMA channel
1418 * the status of multiple cookies without re-checking hardware state.
1426 status = chan->device->device_tx_status(chan, cookie, &state); in dma_async_is_tx_complete()
1435 * dma_async_is_complete - test a cookie against chan state
1462 st->last = last; in dma_set_tx_state()
1463 st->used = used; in dma_set_tx_state()
1464 st->residue = residue; in dma_set_tx_state()
1472 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1477 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1497 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, in __dma_request_channel() argument
1507 return ERR_PTR(-ENODEV); in dma_request_chan()
1510 const dma_cap_mask_t *mask) in dma_request_chan_by_mask() argument
1512 return ERR_PTR(-ENODEV); in dma_request_chan_by_mask()
1520 return -ENXIO; in dma_get_slave_caps()
1529 ret = dma_get_slave_caps(tx->chan, &caps); in dmaengine_desc_set_reuse()
1534 return -EPERM; in dmaengine_desc_set_reuse()
1536 tx->flags |= DMA_CTRL_REUSE; in dmaengine_desc_set_reuse()
1542 tx->flags &= ~DMA_CTRL_REUSE; in dmaengine_desc_clear_reuse()
1547 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE; in dmaengine_desc_test_reuse()
1554 return -EPERM; in dmaengine_desc_free()
1556 return desc->desc_free(desc); in dmaengine_desc_free()
1559 /* --- DMA device --- */
1569 #define dma_request_channel(mask, x, y) \ argument
1570 __dma_request_channel(&(mask), x, y, NULL)
1582 *dma_request_slave_channel_compat(const dma_cap_mask_t mask, in dma_request_slave_channel_compat() argument
1595 return __dma_request_channel(&mask, fn, fn_param, NULL); in dma_request_slave_channel_compat()