Lines Matching +full:sci +full:- +full:rm +full:- +full:range +full:- +full:tchan
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
10 #include <linux/dma-mapping.h>
26 #include <linux/soc/ti/k3-ringacc.h>
29 #include <linux/dma/ti-cppi5.h>
31 #include "../virt-dma.h"
32 #include "k3-udma.h"
33 #include "k3-psil-priv.h"
221 struct udma_tchan *tchan; member
292 if (!uc->tchan) in udma_tchanrt_read()
294 return udma_read(uc->tchan->reg_rt, reg); in udma_tchanrt_read()
299 if (!uc->tchan) in udma_tchanrt_write()
301 udma_write(uc->tchan->reg_rt, reg, val); in udma_tchanrt_write()
307 if (!uc->tchan) in udma_tchanrt_update_bits()
309 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); in udma_tchanrt_update_bits()
315 if (!uc->rchan) in udma_rchanrt_read()
317 return udma_read(uc->rchan->reg_rt, reg); in udma_rchanrt_read()
322 if (!uc->rchan) in udma_rchanrt_write()
324 udma_write(uc->rchan->reg_rt, reg, val); in udma_rchanrt_write()
330 if (!uc->rchan) in udma_rchanrt_update_bits()
332 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); in udma_rchanrt_update_bits()
337 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_pair()
340 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, in navss_psil_pair()
341 tisci_rm->tisci_navss_dev_id, in navss_psil_pair()
348 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_unpair()
351 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, in navss_psil_unpair()
352 tisci_rm->tisci_navss_dev_id, in navss_psil_unpair()
358 memset(&uc->config, 0, sizeof(uc->config)); in udma_reset_uchan()
359 uc->config.remote_thread_id = -1; in udma_reset_uchan()
360 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_uchan()
365 struct device *dev = uc->ud->dev; in udma_dump_chan_stdata()
369 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
370 dev_dbg(dev, "TCHAN State data:\n"); in udma_dump_chan_stdata()
378 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
391 return d->hwdesc[idx].cppi5_desc_paddr; in udma_curr_cppi5_desc_paddr()
396 return d->hwdesc[idx].cppi5_desc_vaddr; in udma_curr_cppi5_desc_vaddr()
402 struct udma_desc *d = uc->terminated_desc; in udma_udma_desc_from_paddr()
406 d->desc_idx); in udma_udma_desc_from_paddr()
413 d = uc->desc; in udma_udma_desc_from_paddr()
416 d->desc_idx); in udma_udma_desc_from_paddr()
428 if (uc->use_dma_pool) { in udma_free_hwdesc()
431 for (i = 0; i < d->hwdesc_count; i++) { in udma_free_hwdesc()
432 if (!d->hwdesc[i].cppi5_desc_vaddr) in udma_free_hwdesc()
435 dma_pool_free(uc->hdesc_pool, in udma_free_hwdesc()
436 d->hwdesc[i].cppi5_desc_vaddr, in udma_free_hwdesc()
437 d->hwdesc[i].cppi5_desc_paddr); in udma_free_hwdesc()
439 d->hwdesc[i].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
441 } else if (d->hwdesc[0].cppi5_desc_vaddr) { in udma_free_hwdesc()
442 struct udma_dev *ud = uc->ud; in udma_free_hwdesc()
444 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, in udma_free_hwdesc()
445 d->hwdesc[0].cppi5_desc_vaddr, in udma_free_hwdesc()
446 d->hwdesc[0].cppi5_desc_paddr); in udma_free_hwdesc()
448 d->hwdesc[0].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
459 spin_lock_irqsave(&ud->lock, flags); in udma_purge_desc_work()
460 list_splice_tail_init(&ud->desc_to_purge, &head); in udma_purge_desc_work()
461 spin_unlock_irqrestore(&ud->lock, flags); in udma_purge_desc_work()
464 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_purge_desc_work()
465 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_purge_desc_work()
468 list_del(&vd->node); in udma_purge_desc_work()
473 if (!list_empty(&ud->desc_to_purge)) in udma_purge_desc_work()
474 schedule_work(&ud->purge_work); in udma_purge_desc_work()
479 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); in udma_desc_free()
480 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_desc_free()
481 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_desc_free()
484 if (uc->terminated_desc == d) in udma_desc_free()
485 uc->terminated_desc = NULL; in udma_desc_free()
487 if (uc->use_dma_pool) { in udma_desc_free()
493 spin_lock_irqsave(&ud->lock, flags); in udma_desc_free()
494 list_add_tail(&vd->node, &ud->desc_to_purge); in udma_desc_free()
495 spin_unlock_irqrestore(&ud->lock, flags); in udma_desc_free()
497 schedule_work(&ud->purge_work); in udma_desc_free()
505 if (uc->tchan) in udma_is_chan_running()
507 if (uc->rchan) in udma_is_chan_running()
520 switch (uc->config.dir) { in udma_is_chan_paused()
545 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; in udma_get_rx_flush_hwdesc_paddr()
550 struct udma_desc *d = uc->desc; in udma_push_to_ring()
554 switch (uc->config.dir) { in udma_push_to_ring()
556 ring = uc->rflow->fd_ring; in udma_push_to_ring()
560 ring = uc->tchan->t_ring; in udma_push_to_ring()
563 return -EINVAL; in udma_push_to_ring()
566 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ in udma_push_to_ring()
567 if (idx == -1) { in udma_push_to_ring()
580 if (uc->config.dir != DMA_DEV_TO_MEM) in udma_desc_is_rx_flush()
594 switch (uc->config.dir) { in udma_pop_from_ring()
596 ring = uc->rflow->r_ring; in udma_pop_from_ring()
600 ring = uc->tchan->tc_ring; in udma_pop_from_ring()
603 return -ENOENT; in udma_pop_from_ring()
618 return -ENOENT; in udma_pop_from_ring()
628 switch (uc->config.dir) { in udma_reset_rings()
630 if (uc->rchan) { in udma_reset_rings()
631 ring1 = uc->rflow->fd_ring; in udma_reset_rings()
632 ring2 = uc->rflow->r_ring; in udma_reset_rings()
637 if (uc->tchan) { in udma_reset_rings()
638 ring1 = uc->tchan->t_ring; in udma_reset_rings()
639 ring2 = uc->tchan->tc_ring; in udma_reset_rings()
653 if (uc->terminated_desc) { in udma_reset_rings()
654 udma_desc_free(&uc->terminated_desc->vd); in udma_reset_rings()
655 uc->terminated_desc = NULL; in udma_reset_rings()
663 if (uc->tchan) { in udma_reset_counters()
677 if (uc->rchan) { in udma_reset_counters()
691 uc->bcnt = 0; in udma_reset_counters()
696 switch (uc->config.dir) { in udma_reset_chan()
710 return -EINVAL; in udma_reset_chan()
716 /* Hard reset: re-initialize the channel to reset */ in udma_reset_chan()
721 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); in udma_reset_chan()
722 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); in udma_reset_chan()
725 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); in udma_reset_chan()
726 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); in udma_reset_chan()
734 if (uc->config.dir == DMA_DEV_TO_MEM) in udma_reset_chan()
740 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_chan()
747 struct udma_chan_config *ucc = &uc->config; in udma_start_desc()
749 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { in udma_start_desc()
753 for (i = 0; i < uc->desc->sglen; i++) in udma_start_desc()
763 if (uc->config.ep_type == PSIL_EP_NATIVE) in udma_chan_needs_reconfiguration()
767 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) in udma_chan_needs_reconfiguration()
775 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); in udma_start()
778 uc->desc = NULL; in udma_start()
779 return -ENOENT; in udma_start()
782 list_del(&vd->node); in udma_start()
784 uc->desc = to_udma_desc(&vd->tx); in udma_start()
798 switch (uc->desc->dir) { in udma_start()
801 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
802 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
803 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
805 uc->ud->match_data; in udma_start()
807 if (uc->config.enable_acc32) in udma_start()
809 if (uc->config.enable_burst) in udma_start()
818 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, in udma_start()
819 match_data->statictr_z_mask)); in udma_start()
822 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
823 sizeof(uc->static_tr)); in udma_start()
836 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
837 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
838 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
840 if (uc->config.enable_acc32) in udma_start()
842 if (uc->config.enable_burst) in udma_start()
850 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
851 sizeof(uc->static_tr)); in udma_start()
870 return -EINVAL; in udma_start()
873 uc->state = UDMA_CHAN_IS_ACTIVE; in udma_start()
881 enum udma_chan_state old_state = uc->state; in udma_stop()
883 uc->state = UDMA_CHAN_IS_TERMINATING; in udma_stop()
884 reinit_completion(&uc->teardown_completed); in udma_stop()
886 switch (uc->config.dir) { in udma_stop()
888 if (!uc->cyclic && !uc->desc) in udma_stop()
889 udma_push_to_ring(uc, -1); in udma_stop()
909 uc->state = old_state; in udma_stop()
910 complete_all(&uc->teardown_completed); in udma_stop()
911 return -EINVAL; in udma_stop()
919 struct udma_desc *d = uc->desc; in udma_cyclic_packet_elapsed()
922 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; in udma_cyclic_packet_elapsed()
924 udma_push_to_ring(uc, d->desc_idx); in udma_cyclic_packet_elapsed()
925 d->desc_idx = (d->desc_idx + 1) % d->sglen; in udma_cyclic_packet_elapsed()
930 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_fetch_epib()
932 memcpy(d->metadata, h_desc->epib, d->metadata_size); in udma_fetch_epib()
940 if (uc->config.ep_type == PSIL_EP_NATIVE || in udma_is_desc_really_done()
941 uc->config.dir != DMA_MEM_TO_DEV) in udma_is_desc_really_done()
949 uc->tx_drain.residue = bcnt - peer_bcnt; in udma_is_desc_really_done()
950 uc->tx_drain.tstamp = ktime_get(); in udma_is_desc_really_done()
967 if (uc->desc) { in udma_check_tx_completion()
969 residue_diff = uc->tx_drain.residue; in udma_check_tx_completion()
970 time_diff = uc->tx_drain.tstamp; in udma_check_tx_completion()
975 desc_done = udma_is_desc_really_done(uc, uc->desc); in udma_check_tx_completion()
983 time_diff = ktime_sub(uc->tx_drain.tstamp, in udma_check_tx_completion()
985 residue_diff -= uc->tx_drain.residue; in udma_check_tx_completion()
994 uc->tx_drain.residue; in udma_check_tx_completion()
997 schedule_delayed_work(&uc->tx_drain.work, HZ); in udma_check_tx_completion()
1006 if (uc->desc) { in udma_check_tx_completion()
1007 struct udma_desc *d = uc->desc; in udma_check_tx_completion()
1009 uc->bcnt += d->residue; in udma_check_tx_completion()
1011 vchan_cookie_complete(&d->vd); in udma_check_tx_completion()
1029 spin_lock_irqsave(&uc->vc.lock, flags); in udma_ring_irq_handler()
1033 complete_all(&uc->teardown_completed); in udma_ring_irq_handler()
1035 if (uc->terminated_desc) { in udma_ring_irq_handler()
1036 udma_desc_free(&uc->terminated_desc->vd); in udma_ring_irq_handler()
1037 uc->terminated_desc = NULL; in udma_ring_irq_handler()
1040 if (!uc->desc) in udma_ring_irq_handler()
1050 d->desc_idx); in udma_ring_irq_handler()
1052 dev_err(uc->ud->dev, "not matching descriptors!\n"); in udma_ring_irq_handler()
1056 if (d == uc->desc) { in udma_ring_irq_handler()
1058 if (uc->cyclic) { in udma_ring_irq_handler()
1060 vchan_cyclic_callback(&d->vd); in udma_ring_irq_handler()
1063 uc->bcnt += d->residue; in udma_ring_irq_handler()
1065 vchan_cookie_complete(&d->vd); in udma_ring_irq_handler()
1067 schedule_delayed_work(&uc->tx_drain.work, in udma_ring_irq_handler()
1076 dma_cookie_complete(&d->vd.tx); in udma_ring_irq_handler()
1080 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_ring_irq_handler()
1091 spin_lock_irqsave(&uc->vc.lock, flags); in udma_udma_irq_handler()
1092 d = uc->desc; in udma_udma_irq_handler()
1094 d->tr_idx = (d->tr_idx + 1) % d->sglen; in udma_udma_irq_handler()
1096 if (uc->cyclic) { in udma_udma_irq_handler()
1097 vchan_cyclic_callback(&d->vd); in udma_udma_irq_handler()
1100 uc->bcnt += d->residue; in udma_udma_irq_handler()
1102 vchan_cookie_complete(&d->vd); in udma_udma_irq_handler()
1106 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_udma_irq_handler()
1112 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1117 * Allocate range of RX flow ids for future use, those flows can be requested
1118 * only using explicit flow id number. if @from is set to -1 it will try to find
1119 * first free range. if @from is positive value it will force allocation only
1120 * of the specified range of flows.
1122 * Returns -ENOMEM if can't find free range.
1123 * -EEXIST if requested range is busy.
1124 * -EINVAL if wrong input values passed.
1134 tmp_from = ud->rchan_cnt; in __udma_alloc_gp_rflow_range()
1136 if (tmp_from < ud->rchan_cnt) in __udma_alloc_gp_rflow_range()
1137 return -EINVAL; in __udma_alloc_gp_rflow_range()
1139 if (tmp_from + cnt > ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1140 return -EINVAL; in __udma_alloc_gp_rflow_range()
1142 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, in __udma_alloc_gp_rflow_range()
1143 ud->rflow_cnt); in __udma_alloc_gp_rflow_range()
1146 ud->rflow_cnt, in __udma_alloc_gp_rflow_range()
1148 if (start >= ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1149 return -ENOMEM; in __udma_alloc_gp_rflow_range()
1152 return -EEXIST; in __udma_alloc_gp_rflow_range()
1154 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); in __udma_alloc_gp_rflow_range()
1160 if (from < ud->rchan_cnt) in __udma_free_gp_rflow_range()
1161 return -EINVAL; in __udma_free_gp_rflow_range()
1162 if (from + cnt > ud->rflow_cnt) in __udma_free_gp_rflow_range()
1163 return -EINVAL; in __udma_free_gp_rflow_range()
1165 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); in __udma_free_gp_rflow_range()
1174 * TI-SCI FW will perform additional permission check ant way, it's in __udma_get_rflow()
1178 if (id < 0 || id >= ud->rflow_cnt) in __udma_get_rflow()
1179 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1181 if (test_bit(id, ud->rflow_in_use)) in __udma_get_rflow()
1182 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1185 if (!test_bit(id, ud->rflow_gp_map) && in __udma_get_rflow()
1186 !test_bit(id, ud->rflow_gp_map_allocated)) in __udma_get_rflow()
1187 return ERR_PTR(-EINVAL); in __udma_get_rflow()
1189 dev_dbg(ud->dev, "get rflow%d\n", id); in __udma_get_rflow()
1190 set_bit(id, ud->rflow_in_use); in __udma_get_rflow()
1191 return &ud->rflows[id]; in __udma_get_rflow()
1196 if (!test_bit(rflow->id, ud->rflow_in_use)) { in __udma_put_rflow()
1197 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); in __udma_put_rflow()
1201 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); in __udma_put_rflow()
1202 clear_bit(rflow->id, ud->rflow_in_use); in __udma_put_rflow()
1211 if (test_bit(id, ud->res##_map)) { \
1212 dev_err(ud->dev, "res##%d is in use\n", id); \
1213 return ERR_PTR(-ENOENT); \
1218 if (tpl >= ud->tpl_levels) \
1219 tpl = ud->tpl_levels - 1; \
1221 start = ud->tpl_start_idx[tpl]; \
1223 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1225 if (id == ud->res##_cnt) { \
1226 return ERR_PTR(-ENOENT); \
1230 set_bit(id, ud->res##_map); \
1231 return &ud->res##s[id]; \
1234 UDMA_RESERVE_RESOURCE(tchan);
1239 struct udma_dev *ud = uc->ud; in udma_get_tchan()
1241 if (uc->tchan) { in udma_get_tchan()
1242 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_tchan()
1243 uc->id, uc->tchan->id); in udma_get_tchan()
1247 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); in udma_get_tchan()
1249 return PTR_ERR_OR_ZERO(uc->tchan); in udma_get_tchan()
1254 struct udma_dev *ud = uc->ud; in udma_get_rchan()
1256 if (uc->rchan) { in udma_get_rchan()
1257 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_rchan()
1258 uc->id, uc->rchan->id); in udma_get_rchan()
1262 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); in udma_get_rchan()
1264 return PTR_ERR_OR_ZERO(uc->rchan); in udma_get_rchan()
1269 struct udma_dev *ud = uc->ud; in udma_get_chan_pair()
1272 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { in udma_get_chan_pair()
1273 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", in udma_get_chan_pair()
1274 uc->id, uc->tchan->id); in udma_get_chan_pair()
1278 if (uc->tchan) { in udma_get_chan_pair()
1279 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_chan_pair()
1280 uc->id, uc->tchan->id); in udma_get_chan_pair()
1281 return -EBUSY; in udma_get_chan_pair()
1282 } else if (uc->rchan) { in udma_get_chan_pair()
1283 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_chan_pair()
1284 uc->id, uc->rchan->id); in udma_get_chan_pair()
1285 return -EBUSY; in udma_get_chan_pair()
1289 end = min(ud->tchan_cnt, ud->rchan_cnt); in udma_get_chan_pair()
1291 chan_id = ud->tpl_start_idx[ud->tpl_levels - 1]; in udma_get_chan_pair()
1293 if (!test_bit(chan_id, ud->tchan_map) && in udma_get_chan_pair()
1294 !test_bit(chan_id, ud->rchan_map)) in udma_get_chan_pair()
1299 return -ENOENT; in udma_get_chan_pair()
1301 set_bit(chan_id, ud->tchan_map); in udma_get_chan_pair()
1302 set_bit(chan_id, ud->rchan_map); in udma_get_chan_pair()
1303 uc->tchan = &ud->tchans[chan_id]; in udma_get_chan_pair()
1304 uc->rchan = &ud->rchans[chan_id]; in udma_get_chan_pair()
1311 struct udma_dev *ud = uc->ud; in udma_get_rflow()
1313 if (!uc->rchan) { in udma_get_rflow()
1314 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); in udma_get_rflow()
1315 return -EINVAL; in udma_get_rflow()
1318 if (uc->rflow) { in udma_get_rflow()
1319 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", in udma_get_rflow()
1320 uc->id, uc->rflow->id); in udma_get_rflow()
1324 uc->rflow = __udma_get_rflow(ud, flow_id); in udma_get_rflow()
1326 return PTR_ERR_OR_ZERO(uc->rflow); in udma_get_rflow()
1331 struct udma_dev *ud = uc->ud; in udma_put_rchan()
1333 if (uc->rchan) { in udma_put_rchan()
1334 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, in udma_put_rchan()
1335 uc->rchan->id); in udma_put_rchan()
1336 clear_bit(uc->rchan->id, ud->rchan_map); in udma_put_rchan()
1337 uc->rchan = NULL; in udma_put_rchan()
1343 struct udma_dev *ud = uc->ud; in udma_put_tchan()
1345 if (uc->tchan) { in udma_put_tchan()
1346 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, in udma_put_tchan()
1347 uc->tchan->id); in udma_put_tchan()
1348 clear_bit(uc->tchan->id, ud->tchan_map); in udma_put_tchan()
1349 uc->tchan = NULL; in udma_put_tchan()
1355 struct udma_dev *ud = uc->ud; in udma_put_rflow()
1357 if (uc->rflow) { in udma_put_rflow()
1358 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, in udma_put_rflow()
1359 uc->rflow->id); in udma_put_rflow()
1360 __udma_put_rflow(ud, uc->rflow); in udma_put_rflow()
1361 uc->rflow = NULL; in udma_put_rflow()
1367 if (!uc->tchan) in udma_free_tx_resources()
1370 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_free_tx_resources()
1371 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_free_tx_resources()
1372 uc->tchan->t_ring = NULL; in udma_free_tx_resources()
1373 uc->tchan->tc_ring = NULL; in udma_free_tx_resources()
1381 struct udma_dev *ud = uc->ud; in udma_alloc_tx_resources()
1388 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1, in udma_alloc_tx_resources()
1389 &uc->tchan->t_ring, in udma_alloc_tx_resources()
1390 &uc->tchan->tc_ring); in udma_alloc_tx_resources()
1392 ret = -EBUSY; in udma_alloc_tx_resources()
1401 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); in udma_alloc_tx_resources()
1402 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); in udma_alloc_tx_resources()
1410 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_alloc_tx_resources()
1411 uc->tchan->tc_ring = NULL; in udma_alloc_tx_resources()
1412 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_alloc_tx_resources()
1413 uc->tchan->t_ring = NULL; in udma_alloc_tx_resources()
1422 if (!uc->rchan) in udma_free_rx_resources()
1425 if (uc->rflow) { in udma_free_rx_resources()
1426 struct udma_rflow *rflow = uc->rflow; in udma_free_rx_resources()
1428 k3_ringacc_ring_free(rflow->fd_ring); in udma_free_rx_resources()
1429 k3_ringacc_ring_free(rflow->r_ring); in udma_free_rx_resources()
1430 rflow->fd_ring = NULL; in udma_free_rx_resources()
1431 rflow->r_ring = NULL; in udma_free_rx_resources()
1441 struct udma_dev *ud = uc->ud; in udma_alloc_rx_resources()
1452 if (uc->config.dir == DMA_MEM_TO_MEM) in udma_alloc_rx_resources()
1455 ret = udma_get_rflow(uc, uc->rchan->id); in udma_alloc_rx_resources()
1457 ret = -EBUSY; in udma_alloc_rx_resources()
1461 rflow = uc->rflow; in udma_alloc_rx_resources()
1462 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; in udma_alloc_rx_resources()
1463 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, in udma_alloc_rx_resources()
1464 &rflow->fd_ring, &rflow->r_ring); in udma_alloc_rx_resources()
1466 ret = -EBUSY; in udma_alloc_rx_resources()
1472 if (uc->config.pkt_mode) in udma_alloc_rx_resources()
1480 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); in udma_alloc_rx_resources()
1482 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); in udma_alloc_rx_resources()
1490 k3_ringacc_ring_free(rflow->r_ring); in udma_alloc_rx_resources()
1491 rflow->r_ring = NULL; in udma_alloc_rx_resources()
1492 k3_ringacc_ring_free(rflow->fd_ring); in udma_alloc_rx_resources()
1493 rflow->fd_ring = NULL; in udma_alloc_rx_resources()
1525 struct udma_dev *ud = uc->ud; in udma_tisci_m2m_channel_config()
1526 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_m2m_channel_config()
1527 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_m2m_channel_config()
1528 struct udma_tchan *tchan = uc->tchan; in udma_tisci_m2m_channel_config() local
1529 struct udma_rchan *rchan = uc->rchan; in udma_tisci_m2m_channel_config()
1532 /* Non synchronized - mem to mem type of transfer */ in udma_tisci_m2m_channel_config()
1533 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_m2m_channel_config()
1538 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1539 req_tx.index = tchan->id; in udma_tisci_m2m_channel_config()
1543 req_tx.tx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1545 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_m2m_channel_config()
1547 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_m2m_channel_config()
1552 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1553 req_rx.index = rchan->id; in udma_tisci_m2m_channel_config()
1557 req_rx.rx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1559 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_m2m_channel_config()
1561 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); in udma_tisci_m2m_channel_config()
1568 struct udma_dev *ud = uc->ud; in udma_tisci_tx_channel_config()
1569 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_tx_channel_config()
1570 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_tx_channel_config()
1571 struct udma_tchan *tchan = uc->tchan; in udma_tisci_tx_channel_config() local
1572 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_tx_channel_config()
1577 if (uc->config.pkt_mode) { in udma_tisci_tx_channel_config()
1579 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_tx_channel_config()
1580 uc->config.psd_size, 0); in udma_tisci_tx_channel_config()
1587 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_tx_channel_config()
1588 req_tx.index = tchan->id; in udma_tisci_tx_channel_config()
1590 req_tx.tx_supr_tdpkt = uc->config.notdpkt; in udma_tisci_tx_channel_config()
1593 req_tx.tx_atype = uc->config.atype; in udma_tisci_tx_channel_config()
1595 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_tx_channel_config()
1597 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_tx_channel_config()
1604 struct udma_dev *ud = uc->ud; in udma_tisci_rx_channel_config()
1605 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_rx_channel_config()
1606 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_rx_channel_config()
1607 struct udma_rchan *rchan = uc->rchan; in udma_tisci_rx_channel_config()
1608 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); in udma_tisci_rx_channel_config()
1609 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_tisci_rx_channel_config()
1615 if (uc->config.pkt_mode) { in udma_tisci_rx_channel_config()
1617 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_rx_channel_config()
1618 uc->config.psd_size, 0); in udma_tisci_rx_channel_config()
1625 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
1626 req_rx.index = rchan->id; in udma_tisci_rx_channel_config()
1630 req_rx.rx_atype = uc->config.atype; in udma_tisci_rx_channel_config()
1632 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_rx_channel_config()
1634 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
1653 flow_req.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
1654 flow_req.flow_index = rchan->id; in udma_tisci_rx_channel_config()
1656 if (uc->config.needs_epib) in udma_tisci_rx_channel_config()
1660 if (uc->config.psd_size) in udma_tisci_rx_channel_config()
1675 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); in udma_tisci_rx_channel_config()
1678 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
1686 struct udma_dev *ud = to_udma_dev(chan->device); in udma_alloc_chan_resources()
1687 const struct udma_soc_data *soc_data = ud->soc_data; in udma_alloc_chan_resources()
1692 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
1693 uc->use_dma_pool = true; in udma_alloc_chan_resources()
1695 if (uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
1696 uc->config.hdesc_size = cppi5_trdesc_calc_size( in udma_alloc_chan_resources()
1698 uc->config.pkt_mode = false; in udma_alloc_chan_resources()
1702 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
1703 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, in udma_alloc_chan_resources()
1704 uc->config.hdesc_size, in udma_alloc_chan_resources()
1705 ud->desc_align, in udma_alloc_chan_resources()
1707 if (!uc->hdesc_pool) { in udma_alloc_chan_resources()
1708 dev_err(ud->ddev.dev, in udma_alloc_chan_resources()
1710 uc->use_dma_pool = false; in udma_alloc_chan_resources()
1711 ret = -ENOMEM; in udma_alloc_chan_resources()
1720 reinit_completion(&uc->teardown_completed); in udma_alloc_chan_resources()
1721 complete_all(&uc->teardown_completed); in udma_alloc_chan_resources()
1722 uc->state = UDMA_CHAN_IS_IDLE; in udma_alloc_chan_resources()
1724 switch (uc->config.dir) { in udma_alloc_chan_resources()
1726 /* Non synchronized - mem to mem type of transfer */ in udma_alloc_chan_resources()
1727 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, in udma_alloc_chan_resources()
1728 uc->id); in udma_alloc_chan_resources()
1746 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
1747 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
1750 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
1751 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
1756 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in udma_alloc_chan_resources()
1757 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in udma_alloc_chan_resources()
1758 uc->id); in udma_alloc_chan_resources()
1764 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
1765 uc->config.dst_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
1766 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in udma_alloc_chan_resources()
1768 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
1769 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
1774 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in udma_alloc_chan_resources()
1775 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in udma_alloc_chan_resources()
1776 uc->id); in udma_alloc_chan_resources()
1782 uc->config.src_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
1783 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
1786 irq_ring = uc->rflow->r_ring; in udma_alloc_chan_resources()
1787 irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id; in udma_alloc_chan_resources()
1793 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in udma_alloc_chan_resources()
1794 __func__, uc->id, uc->config.dir); in udma_alloc_chan_resources()
1795 ret = -EINVAL; in udma_alloc_chan_resources()
1805 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in udma_alloc_chan_resources()
1808 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in udma_alloc_chan_resources()
1809 ret = -EBUSY; in udma_alloc_chan_resources()
1814 /* PSI-L pairing */ in udma_alloc_chan_resources()
1815 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
1817 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in udma_alloc_chan_resources()
1818 uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
1822 uc->psil_paired = true; in udma_alloc_chan_resources()
1824 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); in udma_alloc_chan_resources()
1825 if (uc->irq_num_ring <= 0) { in udma_alloc_chan_resources()
1826 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in udma_alloc_chan_resources()
1828 ret = -EINVAL; in udma_alloc_chan_resources()
1832 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in udma_alloc_chan_resources()
1833 IRQF_TRIGGER_HIGH, uc->name, uc); in udma_alloc_chan_resources()
1835 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in udma_alloc_chan_resources()
1840 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { in udma_alloc_chan_resources()
1841 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, in udma_alloc_chan_resources()
1843 if (uc->irq_num_udma <= 0) { in udma_alloc_chan_resources()
1844 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", in udma_alloc_chan_resources()
1846 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
1847 ret = -EINVAL; in udma_alloc_chan_resources()
1851 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, in udma_alloc_chan_resources()
1852 uc->name, uc); in udma_alloc_chan_resources()
1854 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", in udma_alloc_chan_resources()
1855 uc->id); in udma_alloc_chan_resources()
1856 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
1860 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
1868 uc->irq_num_ring = 0; in udma_alloc_chan_resources()
1869 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
1871 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
1872 uc->psil_paired = false; in udma_alloc_chan_resources()
1879 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
1880 dma_pool_destroy(uc->hdesc_pool); in udma_alloc_chan_resources()
1881 uc->use_dma_pool = false; in udma_alloc_chan_resources()
1892 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); in udma_slave_config()
1914 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); in udma_alloc_tr_desc()
1919 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); in udma_alloc_tr_desc()
1923 d->sglen = tr_count; in udma_alloc_tr_desc()
1925 d->hwdesc_count = 1; in udma_alloc_tr_desc()
1926 hwdesc = &d->hwdesc[0]; in udma_alloc_tr_desc()
1929 if (uc->use_dma_pool) { in udma_alloc_tr_desc()
1930 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_alloc_tr_desc()
1931 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_alloc_tr_desc()
1933 &hwdesc->cppi5_desc_paddr); in udma_alloc_tr_desc()
1935 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, in udma_alloc_tr_desc()
1937 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
1938 uc->ud->desc_align); in udma_alloc_tr_desc()
1939 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, in udma_alloc_tr_desc()
1940 hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
1941 &hwdesc->cppi5_desc_paddr, in udma_alloc_tr_desc()
1945 if (!hwdesc->cppi5_desc_vaddr) { in udma_alloc_tr_desc()
1951 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_alloc_tr_desc()
1953 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; in udma_alloc_tr_desc()
1955 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_alloc_tr_desc()
1957 if (uc->cyclic) in udma_alloc_tr_desc()
1961 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_alloc_tr_desc()
1963 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_alloc_tr_desc()
1966 cppi5_desc_set_pktids(tr_desc, uc->id, in udma_alloc_tr_desc()
1974 * udma_get_tr_counters - calculate TR counters for a given length
1983 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
1987 * -EINVAL if the length can not be supported
2003 *tr0_cnt0 = SZ_64K - BIT(align_to); in udma_get_tr_counters()
2006 align_to--; in udma_get_tr_counters()
2009 return -EINVAL; in udma_get_tr_counters()
2046 d->sglen = sglen; in udma_prep_slave_sg_tr()
2048 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_slave_sg_tr()
2055 dev_err(uc->ud->dev, "size %u is not supported\n", in udma_prep_slave_sg_tr()
2086 d->residue += sg_dma_len(sgent); in udma_prep_slave_sg_tr()
2089 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, in udma_prep_slave_sg_tr()
2099 if (uc->config.ep_type != PSIL_EP_PDMA_XY) in udma_configure_statictr()
2105 d->static_tr.elsize = 0; in udma_configure_statictr()
2108 d->static_tr.elsize = 1; in udma_configure_statictr()
2111 d->static_tr.elsize = 2; in udma_configure_statictr()
2114 d->static_tr.elsize = 3; in udma_configure_statictr()
2117 d->static_tr.elsize = 4; in udma_configure_statictr()
2120 return -EINVAL; in udma_configure_statictr()
2123 d->static_tr.elcnt = elcnt; in udma_configure_statictr()
2131 if (uc->config.pkt_mode || !uc->cyclic) { in udma_configure_statictr()
2134 if (uc->cyclic) in udma_configure_statictr()
2135 d->static_tr.bstcnt = d->residue / d->sglen / div; in udma_configure_statictr()
2137 d->static_tr.bstcnt = d->residue / div; in udma_configure_statictr()
2139 if (uc->config.dir == DMA_DEV_TO_MEM && in udma_configure_statictr()
2140 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) in udma_configure_statictr()
2141 return -EINVAL; in udma_configure_statictr()
2143 d->static_tr.bstcnt = 0; in udma_configure_statictr()
2164 d->sglen = sglen; in udma_prep_slave_sg_pkt()
2165 d->hwdesc_count = sglen; in udma_prep_slave_sg_pkt()
2168 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_slave_sg_pkt()
2170 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_slave_sg_pkt()
2173 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_slave_sg_pkt()
2178 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_slave_sg_pkt()
2180 &hwdesc->cppi5_desc_paddr); in udma_prep_slave_sg_pkt()
2181 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_slave_sg_pkt()
2182 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
2190 d->residue += sg_len; in udma_prep_slave_sg_pkt()
2191 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_slave_sg_pkt()
2192 desc = hwdesc->cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
2197 cppi5_desc_set_pktids(&desc->hdr, uc->id, in udma_prep_slave_sg_pkt()
2199 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); in udma_prep_slave_sg_pkt()
2202 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); in udma_prep_slave_sg_pkt()
2211 hwdesc->cppi5_desc_paddr); in udma_prep_slave_sg_pkt()
2217 if (d->residue >= SZ_4M) { in udma_prep_slave_sg_pkt()
2218 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
2219 "%s: Transfer size %u is over the supported 4M range\n", in udma_prep_slave_sg_pkt()
2220 __func__, d->residue); in udma_prep_slave_sg_pkt()
2226 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
2227 cppi5_hdesc_set_pktlen(h_desc, d->residue); in udma_prep_slave_sg_pkt()
2236 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_attach_metadata()
2241 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_attach_metadata()
2242 return -ENOTSUPP; in udma_attach_metadata()
2244 if (!data || len > uc->config.metadata_size) in udma_attach_metadata()
2245 return -EINVAL; in udma_attach_metadata()
2247 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_attach_metadata()
2248 return -EINVAL; in udma_attach_metadata()
2250 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_attach_metadata()
2251 if (d->dir == DMA_MEM_TO_DEV) in udma_attach_metadata()
2252 memcpy(h_desc->epib, data, len); in udma_attach_metadata()
2254 if (uc->config.needs_epib) in udma_attach_metadata()
2255 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_attach_metadata()
2257 d->metadata = data; in udma_attach_metadata()
2258 d->metadata_size = len; in udma_attach_metadata()
2259 if (uc->config.needs_epib) in udma_attach_metadata()
2272 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_get_metadata_ptr()
2275 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_get_metadata_ptr()
2276 return ERR_PTR(-ENOTSUPP); in udma_get_metadata_ptr()
2278 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_get_metadata_ptr()
2280 *max_len = uc->config.metadata_size; in udma_get_metadata_ptr()
2282 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? in udma_get_metadata_ptr()
2286 return h_desc->epib; in udma_get_metadata_ptr()
2293 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_set_metadata_len()
2298 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_set_metadata_len()
2299 return -ENOTSUPP; in udma_set_metadata_len()
2301 if (payload_len > uc->config.metadata_size) in udma_set_metadata_len()
2302 return -EINVAL; in udma_set_metadata_len()
2304 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_set_metadata_len()
2305 return -EINVAL; in udma_set_metadata_len()
2307 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_set_metadata_len()
2309 if (uc->config.needs_epib) { in udma_set_metadata_len()
2310 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_set_metadata_len()
2336 if (dir != uc->config.dir) { in udma_prep_slave_sg()
2337 dev_err(chan->device->dev, in udma_prep_slave_sg()
2339 __func__, uc->id, in udma_prep_slave_sg()
2340 dmaengine_get_direction_text(uc->config.dir), in udma_prep_slave_sg()
2346 dev_width = uc->cfg.src_addr_width; in udma_prep_slave_sg()
2347 burst = uc->cfg.src_maxburst; in udma_prep_slave_sg()
2349 dev_width = uc->cfg.dst_addr_width; in udma_prep_slave_sg()
2350 burst = uc->cfg.dst_maxburst; in udma_prep_slave_sg()
2352 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); in udma_prep_slave_sg()
2359 if (uc->config.pkt_mode) in udma_prep_slave_sg()
2369 d->dir = dir; in udma_prep_slave_sg()
2370 d->desc_idx = 0; in udma_prep_slave_sg()
2371 d->tr_idx = 0; in udma_prep_slave_sg()
2375 dev_err(uc->ud->dev, in udma_prep_slave_sg()
2377 __func__, d->static_tr.bstcnt); in udma_prep_slave_sg()
2384 if (uc->config.metadata_size) in udma_prep_slave_sg()
2385 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_slave_sg()
2387 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_slave_sg()
2406 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_cyclic_tr()
2417 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_cyclic_tr()
2465 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) in udma_prep_dma_cyclic_pkt()
2475 d->hwdesc_count = periods; in udma_prep_dma_cyclic_pkt()
2477 /* TODO: re-check this... */ in udma_prep_dma_cyclic_pkt()
2479 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_dma_cyclic_pkt()
2481 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_dma_cyclic_pkt()
2484 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_dma_cyclic_pkt()
2488 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_dma_cyclic_pkt()
2490 &hwdesc->cppi5_desc_paddr); in udma_prep_dma_cyclic_pkt()
2491 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_dma_cyclic_pkt()
2492 dev_err(uc->ud->dev, in udma_prep_dma_cyclic_pkt()
2500 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_dma_cyclic_pkt()
2501 h_desc = hwdesc->cppi5_desc_vaddr; in udma_prep_dma_cyclic_pkt()
2507 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, in udma_prep_dma_cyclic_pkt()
2509 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); in udma_prep_dma_cyclic_pkt()
2530 if (dir != uc->config.dir) { in udma_prep_dma_cyclic()
2531 dev_err(chan->device->dev, in udma_prep_dma_cyclic()
2533 __func__, uc->id, in udma_prep_dma_cyclic()
2534 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_cyclic()
2539 uc->cyclic = true; in udma_prep_dma_cyclic()
2542 dev_width = uc->cfg.src_addr_width; in udma_prep_dma_cyclic()
2543 burst = uc->cfg.src_maxburst; in udma_prep_dma_cyclic()
2545 dev_width = uc->cfg.dst_addr_width; in udma_prep_dma_cyclic()
2546 burst = uc->cfg.dst_maxburst; in udma_prep_dma_cyclic()
2548 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); in udma_prep_dma_cyclic()
2555 if (uc->config.pkt_mode) in udma_prep_dma_cyclic()
2565 d->sglen = buf_len / period_len; in udma_prep_dma_cyclic()
2567 d->dir = dir; in udma_prep_dma_cyclic()
2568 d->residue = buf_len; in udma_prep_dma_cyclic()
2572 dev_err(uc->ud->dev, in udma_prep_dma_cyclic()
2574 __func__, d->static_tr.bstcnt); in udma_prep_dma_cyclic()
2581 if (uc->config.metadata_size) in udma_prep_dma_cyclic()
2582 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_cyclic()
2584 return vchan_tx_prep(&uc->vc, &d->vd, flags); in udma_prep_dma_cyclic()
2598 if (uc->config.dir != DMA_MEM_TO_MEM) { in udma_prep_dma_memcpy()
2599 dev_err(chan->device->dev, in udma_prep_dma_memcpy()
2601 __func__, uc->id, in udma_prep_dma_memcpy()
2602 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_memcpy()
2610 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_memcpy()
2619 d->dir = DMA_MEM_TO_MEM; in udma_prep_dma_memcpy()
2620 d->desc_idx = 0; in udma_prep_dma_memcpy()
2621 d->tr_idx = 0; in udma_prep_dma_memcpy()
2622 d->residue = len; in udma_prep_dma_memcpy()
2624 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_memcpy()
2662 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, in udma_prep_dma_memcpy()
2665 if (uc->config.metadata_size) in udma_prep_dma_memcpy()
2666 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_memcpy()
2668 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_dma_memcpy()
2676 spin_lock_irqsave(&uc->vc.lock, flags); in udma_issue_pending()
2679 if (vchan_issue_pending(&uc->vc) && !uc->desc) { in udma_issue_pending()
2685 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && in udma_issue_pending()
2690 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_issue_pending()
2701 spin_lock_irqsave(&uc->vc.lock, flags); in udma_tx_status()
2714 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { in udma_tx_status()
2717 u32 residue = uc->desc->residue; in udma_tx_status()
2720 if (uc->desc->dir == DMA_MEM_TO_DEV) { in udma_tx_status()
2723 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
2728 delay = bcnt - peer_bcnt; in udma_tx_status()
2730 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { in udma_tx_status()
2733 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
2738 delay = peer_bcnt - bcnt; in udma_tx_status()
2744 bcnt -= uc->bcnt; in udma_tx_status()
2745 if (bcnt && !(bcnt % uc->desc->residue)) in udma_tx_status()
2748 residue -= bcnt % uc->desc->residue; in udma_tx_status()
2750 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { in udma_tx_status()
2763 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_tx_status()
2772 switch (uc->config.dir) { in udma_pause()
2789 return -EINVAL; in udma_pause()
2800 switch (uc->config.dir) { in udma_resume()
2815 return -EINVAL; in udma_resume()
2827 spin_lock_irqsave(&uc->vc.lock, flags); in udma_terminate_all()
2832 if (uc->desc) { in udma_terminate_all()
2833 uc->terminated_desc = uc->desc; in udma_terminate_all()
2834 uc->desc = NULL; in udma_terminate_all()
2835 uc->terminated_desc->terminated = true; in udma_terminate_all()
2836 cancel_delayed_work(&uc->tx_drain.work); in udma_terminate_all()
2839 uc->paused = false; in udma_terminate_all()
2841 vchan_get_all_descriptors(&uc->vc, &head); in udma_terminate_all()
2842 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_terminate_all()
2843 vchan_dma_desc_free_list(&uc->vc, &head); in udma_terminate_all()
2853 vchan_synchronize(&uc->vc); in udma_synchronize()
2855 if (uc->state == UDMA_CHAN_IS_TERMINATING) { in udma_synchronize()
2856 timeout = wait_for_completion_timeout(&uc->teardown_completed, in udma_synchronize()
2859 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", in udma_synchronize()
2860 uc->id); in udma_synchronize()
2868 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); in udma_synchronize()
2870 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_synchronize()
2878 struct udma_chan *uc = to_udma_chan(&vc->chan); in udma_desc_pre_callback()
2884 d = to_udma_desc(&vd->tx); in udma_desc_pre_callback()
2886 if (d->metadata_size) in udma_desc_pre_callback()
2891 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); in udma_desc_pre_callback()
2895 result->residue = d->residue - in udma_desc_pre_callback()
2897 if (result->residue) in udma_desc_pre_callback()
2898 result->result = DMA_TRANS_ABORTED; in udma_desc_pre_callback()
2900 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
2902 result->residue = 0; in udma_desc_pre_callback()
2903 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
2919 spin_lock_irq(&vc->lock); in udma_vchan_complete()
2920 list_splice_tail_init(&vc->desc_completed, &head); in udma_vchan_complete()
2921 vd = vc->cyclic; in udma_vchan_complete()
2923 vc->cyclic = NULL; in udma_vchan_complete()
2924 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
2928 spin_unlock_irq(&vc->lock); in udma_vchan_complete()
2936 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
2938 list_del(&vd->node); in udma_vchan_complete()
2950 struct udma_dev *ud = to_udma_dev(chan->device); in udma_free_chan_resources()
2953 if (uc->terminated_desc) { in udma_free_chan_resources()
2958 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_free_chan_resources()
2960 if (uc->irq_num_ring > 0) { in udma_free_chan_resources()
2961 free_irq(uc->irq_num_ring, uc); in udma_free_chan_resources()
2963 uc->irq_num_ring = 0; in udma_free_chan_resources()
2965 if (uc->irq_num_udma > 0) { in udma_free_chan_resources()
2966 free_irq(uc->irq_num_udma, uc); in udma_free_chan_resources()
2968 uc->irq_num_udma = 0; in udma_free_chan_resources()
2971 /* Release PSI-L pairing */ in udma_free_chan_resources()
2972 if (uc->psil_paired) { in udma_free_chan_resources()
2973 navss_psil_unpair(ud, uc->config.src_thread, in udma_free_chan_resources()
2974 uc->config.dst_thread); in udma_free_chan_resources()
2975 uc->psil_paired = false; in udma_free_chan_resources()
2978 vchan_free_chan_resources(&uc->vc); in udma_free_chan_resources()
2979 tasklet_kill(&uc->vc.task); in udma_free_chan_resources()
2985 if (uc->use_dma_pool) { in udma_free_chan_resources()
2986 dma_pool_destroy(uc->hdesc_pool); in udma_free_chan_resources()
2987 uc->use_dma_pool = false; in udma_free_chan_resources()
3006 if (chan->device->dev->driver != &udma_driver.driver) in udma_dma_filter_fn()
3010 ucc = &uc->config; in udma_dma_filter_fn()
3011 ud = uc->ud; in udma_dma_filter_fn()
3014 if (filter_param->atype > 2) { in udma_dma_filter_fn()
3015 dev_err(ud->dev, "Invalid channel atype: %u\n", in udma_dma_filter_fn()
3016 filter_param->atype); in udma_dma_filter_fn()
3020 ucc->remote_thread_id = filter_param->remote_thread_id; in udma_dma_filter_fn()
3021 ucc->atype = filter_param->atype; in udma_dma_filter_fn()
3023 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) in udma_dma_filter_fn()
3024 ucc->dir = DMA_MEM_TO_DEV; in udma_dma_filter_fn()
3026 ucc->dir = DMA_DEV_TO_MEM; in udma_dma_filter_fn()
3028 ep_config = psil_get_ep_config(ucc->remote_thread_id); in udma_dma_filter_fn()
3030 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", in udma_dma_filter_fn()
3031 ucc->remote_thread_id); in udma_dma_filter_fn()
3032 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
3033 ucc->remote_thread_id = -1; in udma_dma_filter_fn()
3034 ucc->atype = 0; in udma_dma_filter_fn()
3038 ucc->pkt_mode = ep_config->pkt_mode; in udma_dma_filter_fn()
3039 ucc->channel_tpl = ep_config->channel_tpl; in udma_dma_filter_fn()
3040 ucc->notdpkt = ep_config->notdpkt; in udma_dma_filter_fn()
3041 ucc->ep_type = ep_config->ep_type; in udma_dma_filter_fn()
3043 if (ucc->ep_type != PSIL_EP_NATIVE) { in udma_dma_filter_fn()
3044 const struct udma_match_data *match_data = ud->match_data; in udma_dma_filter_fn()
3046 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) in udma_dma_filter_fn()
3047 ucc->enable_acc32 = ep_config->pdma_acc32; in udma_dma_filter_fn()
3048 if (match_data->flags & UDMA_FLAG_PDMA_BURST) in udma_dma_filter_fn()
3049 ucc->enable_burst = ep_config->pdma_burst; in udma_dma_filter_fn()
3052 ucc->needs_epib = ep_config->needs_epib; in udma_dma_filter_fn()
3053 ucc->psd_size = ep_config->psd_size; in udma_dma_filter_fn()
3054 ucc->metadata_size = in udma_dma_filter_fn()
3055 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + in udma_dma_filter_fn()
3056 ucc->psd_size; in udma_dma_filter_fn()
3058 if (ucc->pkt_mode) in udma_dma_filter_fn()
3059 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_dma_filter_fn()
3060 ucc->metadata_size, ud->desc_align); in udma_dma_filter_fn()
3062 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, in udma_dma_filter_fn()
3063 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); in udma_dma_filter_fn()
3071 struct udma_dev *ud = ofdma->of_dma_data; in udma_of_xlate()
3072 dma_cap_mask_t mask = ud->ddev.cap_mask; in udma_of_xlate()
3076 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) in udma_of_xlate()
3079 filter_param.remote_thread_id = dma_spec->args[0]; in udma_of_xlate()
3080 if (dma_spec->args_count == 2) in udma_of_xlate()
3081 filter_param.atype = dma_spec->args[1]; in udma_of_xlate()
3086 ofdma->of_node); in udma_of_xlate()
3088 dev_err(ud->dev, "get channel fail in %s.\n", __func__); in udma_of_xlate()
3089 return ERR_PTR(-EINVAL); in udma_of_xlate()
3123 .compatible = "ti,am654-navss-main-udmap",
3127 .compatible = "ti,am654-navss-mcu-udmap",
3130 .compatible = "ti,j721e-navss-main-udmap",
3133 .compatible = "ti,j721e-navss-mcu-udmap",
3163 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); in udma_get_mmrs()
3164 if (IS_ERR(ud->mmrs[i])) in udma_get_mmrs()
3165 return PTR_ERR(ud->mmrs[i]); in udma_get_mmrs()
3173 struct device *dev = ud->dev; in udma_setup_resources()
3178 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_setup_resources()
3179 static const char * const range_names[] = { "ti,sci-rm-range-tchan", in udma_setup_resources()
3180 "ti,sci-rm-range-rchan", in udma_setup_resources()
3181 "ti,sci-rm-range-rflow" }; in udma_setup_resources()
3183 cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2)); in udma_setup_resources()
3184 cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3)); in udma_setup_resources()
3186 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); in udma_setup_resources()
3187 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); in udma_setup_resources()
3188 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); in udma_setup_resources()
3189 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); in udma_setup_resources()
3190 ch_count = ud->tchan_cnt + ud->rchan_cnt; in udma_setup_resources()
3193 if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
3194 "ti,am654-navss-main-udmap")) { in udma_setup_resources()
3195 ud->tpl_levels = 2; in udma_setup_resources()
3196 ud->tpl_start_idx[0] = 8; in udma_setup_resources()
3197 } else if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
3198 "ti,am654-navss-mcu-udmap")) { in udma_setup_resources()
3199 ud->tpl_levels = 2; in udma_setup_resources()
3200 ud->tpl_start_idx[0] = 2; in udma_setup_resources()
3202 ud->tpl_levels = 3; in udma_setup_resources()
3203 ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); in udma_setup_resources()
3204 ud->tpl_start_idx[0] = ud->tpl_start_idx[1] + in udma_setup_resources()
3207 ud->tpl_levels = 2; in udma_setup_resources()
3208 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in udma_setup_resources()
3210 ud->tpl_levels = 1; in udma_setup_resources()
3213 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in udma_setup_resources()
3215 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in udma_setup_resources()
3217 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in udma_setup_resources()
3219 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in udma_setup_resources()
3221 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
3224 ud->rflow_gp_map_allocated = devm_kcalloc(dev, in udma_setup_resources()
3225 BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
3228 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
3231 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), in udma_setup_resources()
3234 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || in udma_setup_resources()
3235 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || in udma_setup_resources()
3236 !ud->rflows || !ud->rflow_in_use) in udma_setup_resources()
3237 return -ENOMEM; in udma_setup_resources()
3244 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); in udma_setup_resources()
3247 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); in udma_setup_resources()
3251 tisci_rm->rm_ranges[i] = in udma_setup_resources()
3252 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in udma_setup_resources()
3253 tisci_rm->tisci_dev_id, in udma_setup_resources()
3256 /* tchan ranges */ in udma_setup_resources()
3257 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
3259 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
3261 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
3262 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
3263 rm_desc = &rm_res->desc[i]; in udma_setup_resources()
3264 bitmap_clear(ud->tchan_map, rm_desc->start, in udma_setup_resources()
3265 rm_desc->num); in udma_setup_resources()
3266 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", in udma_setup_resources()
3267 rm_desc->start, rm_desc->num); in udma_setup_resources()
3270 irq_res.sets = rm_res->sets; in udma_setup_resources()
3273 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
3275 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
3277 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
3278 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
3279 rm_desc = &rm_res->desc[i]; in udma_setup_resources()
3280 bitmap_clear(ud->rchan_map, rm_desc->start, in udma_setup_resources()
3281 rm_desc->num); in udma_setup_resources()
3282 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", in udma_setup_resources()
3283 rm_desc->start, rm_desc->num); in udma_setup_resources()
3287 irq_res.sets += rm_res->sets; in udma_setup_resources()
3289 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
3290 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
3291 irq_res.desc[i].start = rm_res->desc[i].start; in udma_setup_resources()
3292 irq_res.desc[i].num = rm_res->desc[i].num; in udma_setup_resources()
3294 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
3295 for (j = 0; j < rm_res->sets; j++, i++) { in udma_setup_resources()
3296 irq_res.desc[i].start = rm_res->desc[j].start + in udma_setup_resources()
3297 ud->soc_data->rchan_oes_offset; in udma_setup_resources()
3298 irq_res.desc[i].num = rm_res->desc[j].num; in udma_setup_resources()
3300 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in udma_setup_resources()
3303 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in udma_setup_resources()
3308 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in udma_setup_resources()
3311 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, in udma_setup_resources()
3312 ud->rflow_cnt - ud->rchan_cnt); in udma_setup_resources()
3314 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
3315 rm_desc = &rm_res->desc[i]; in udma_setup_resources()
3316 bitmap_clear(ud->rflow_gp_map, rm_desc->start, in udma_setup_resources()
3317 rm_desc->num); in udma_setup_resources()
3318 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", in udma_setup_resources()
3319 rm_desc->start, rm_desc->num); in udma_setup_resources()
3323 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
3324 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
3326 return -ENODEV; in udma_setup_resources()
3328 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), in udma_setup_resources()
3330 if (!ud->channels) in udma_setup_resources()
3331 return -ENOMEM; in udma_setup_resources()
3333 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", in udma_setup_resources()
3335 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), in udma_setup_resources()
3336 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), in udma_setup_resources()
3337 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, in udma_setup_resources()
3338 ud->rflow_cnt)); in udma_setup_resources()
3345 struct udma_rx_flush *rx_flush = &ud->rx_flush; in udma_setup_rx_flush()
3349 struct device *dev = ud->dev; in udma_setup_rx_flush()
3354 rx_flush->buffer_size = SZ_1K; in udma_setup_rx_flush()
3355 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, in udma_setup_rx_flush()
3357 if (!rx_flush->buffer_vaddr) in udma_setup_rx_flush()
3358 return -ENOMEM; in udma_setup_rx_flush()
3360 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, in udma_setup_rx_flush()
3361 rx_flush->buffer_size, in udma_setup_rx_flush()
3363 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) in udma_setup_rx_flush()
3364 return -ENOMEM; in udma_setup_rx_flush()
3367 hwdesc = &rx_flush->hwdescs[0]; in udma_setup_rx_flush()
3369 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); in udma_setup_rx_flush()
3370 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
3371 ud->desc_align); in udma_setup_rx_flush()
3373 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
3375 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
3376 return -ENOMEM; in udma_setup_rx_flush()
3378 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
3379 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
3381 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
3382 return -ENOMEM; in udma_setup_rx_flush()
3385 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_setup_rx_flush()
3387 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; in udma_setup_rx_flush()
3389 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
3394 tr_req = hwdesc->tr_req_base; in udma_setup_rx_flush()
3395 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, in udma_setup_rx_flush()
3397 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); in udma_setup_rx_flush()
3399 tr_req->addr = rx_flush->buffer_paddr; in udma_setup_rx_flush()
3400 tr_req->icnt0 = rx_flush->buffer_size; in udma_setup_rx_flush()
3401 tr_req->icnt1 = 1; in udma_setup_rx_flush()
3403 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
3404 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
3407 hwdesc = &rx_flush->hwdescs[1]; in udma_setup_rx_flush()
3408 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_setup_rx_flush()
3411 ud->desc_align); in udma_setup_rx_flush()
3413 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
3415 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
3416 return -ENOMEM; in udma_setup_rx_flush()
3418 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
3419 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
3421 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
3422 return -ENOMEM; in udma_setup_rx_flush()
3424 desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
3426 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); in udma_setup_rx_flush()
3427 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); in udma_setup_rx_flush()
3430 rx_flush->buffer_paddr, rx_flush->buffer_size, in udma_setup_rx_flush()
3431 rx_flush->buffer_paddr, rx_flush->buffer_size); in udma_setup_rx_flush()
3433 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
3434 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
3443 struct udma_chan_config *ucc = &uc->config; in udma_dbg_summary_show_chan()
3445 seq_printf(s, " %-13s| %s", dma_chan_name(chan), in udma_dbg_summary_show_chan()
3446 chan->dbg_client_name ?: "in-use"); in udma_dbg_summary_show_chan()
3447 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir)); in udma_dbg_summary_show_chan()
3449 switch (uc->config.dir) { in udma_dbg_summary_show_chan()
3451 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
3452 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
3455 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, in udma_dbg_summary_show_chan()
3456 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
3459 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
3460 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
3467 if (ucc->ep_type == PSIL_EP_NATIVE) { in udma_dbg_summary_show_chan()
3468 seq_printf(s, "PSI-L Native"); in udma_dbg_summary_show_chan()
3469 if (ucc->metadata_size) { in udma_dbg_summary_show_chan()
3470 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); in udma_dbg_summary_show_chan()
3471 if (ucc->psd_size) in udma_dbg_summary_show_chan()
3472 seq_printf(s, " PSDsize:%u", ucc->psd_size); in udma_dbg_summary_show_chan()
3477 if (ucc->enable_acc32 || ucc->enable_burst) in udma_dbg_summary_show_chan()
3479 ucc->enable_acc32 ? " ACC32" : "", in udma_dbg_summary_show_chan()
3480 ucc->enable_burst ? " BURST" : ""); in udma_dbg_summary_show_chan()
3483 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); in udma_dbg_summary_show_chan()
3491 list_for_each_entry(chan, &dma_dev->channels, device_node) { in udma_dbg_summary_show()
3492 if (chan->client_count) in udma_dbg_summary_show()
3506 struct device_node *navss_node = pdev->dev.parent->of_node; in udma_probe()
3508 struct device *dev = &pdev->dev; in udma_probe()
3520 return -ENOMEM; in udma_probe()
3526 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); in udma_probe()
3527 if (IS_ERR(ud->tisci_rm.tisci)) in udma_probe()
3528 return PTR_ERR(ud->tisci_rm.tisci); in udma_probe()
3530 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", in udma_probe()
3531 &ud->tisci_rm.tisci_dev_id); in udma_probe()
3533 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); in udma_probe()
3536 pdev->id = ud->tisci_rm.tisci_dev_id; in udma_probe()
3538 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", in udma_probe()
3539 &ud->tisci_rm.tisci_navss_dev_id); in udma_probe()
3541 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); in udma_probe()
3545 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); in udma_probe()
3546 if (!ret && ud->atype > 2) { in udma_probe()
3547 dev_err(dev, "Invalid atype: %u\n", ud->atype); in udma_probe()
3548 return -EINVAL; in udma_probe()
3551 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; in udma_probe()
3552 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; in udma_probe()
3554 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); in udma_probe()
3555 if (IS_ERR(ud->ringacc)) in udma_probe()
3556 return PTR_ERR(ud->ringacc); in udma_probe()
3558 dev->msi_domain = of_msi_get_domain(dev, dev->of_node, in udma_probe()
3560 if (!dev->msi_domain) { in udma_probe()
3562 return -EPROBE_DEFER; in udma_probe()
3565 match = of_match_node(udma_of_match, dev->of_node); in udma_probe()
3568 return -ENODEV; in udma_probe()
3570 ud->match_data = match->data; in udma_probe()
3575 return -ENODEV; in udma_probe()
3577 ud->soc_data = soc->data; in udma_probe()
3579 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); in udma_probe()
3580 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); in udma_probe()
3582 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; in udma_probe()
3583 ud->ddev.device_config = udma_slave_config; in udma_probe()
3584 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; in udma_probe()
3585 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; in udma_probe()
3586 ud->ddev.device_issue_pending = udma_issue_pending; in udma_probe()
3587 ud->ddev.device_tx_status = udma_tx_status; in udma_probe()
3588 ud->ddev.device_pause = udma_pause; in udma_probe()
3589 ud->ddev.device_resume = udma_resume; in udma_probe()
3590 ud->ddev.device_terminate_all = udma_terminate_all; in udma_probe()
3591 ud->ddev.device_synchronize = udma_synchronize; in udma_probe()
3593 ud->ddev.dbg_summary_show = udma_dbg_summary_show; in udma_probe()
3596 ud->ddev.device_free_chan_resources = udma_free_chan_resources; in udma_probe()
3597 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
3598 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
3599 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in udma_probe()
3600 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in udma_probe()
3601 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; in udma_probe()
3602 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | in udma_probe()
3604 if (ud->match_data->enable_memcpy_support) { in udma_probe()
3605 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); in udma_probe()
3606 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; in udma_probe()
3607 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); in udma_probe()
3610 ud->ddev.dev = dev; in udma_probe()
3611 ud->dev = dev; in udma_probe()
3612 ud->psil_base = ud->match_data->psil_base; in udma_probe()
3614 INIT_LIST_HEAD(&ud->ddev.channels); in udma_probe()
3615 INIT_LIST_HEAD(&ud->desc_to_purge); in udma_probe()
3621 spin_lock_init(&ud->lock); in udma_probe()
3622 INIT_WORK(&ud->purge_work, udma_purge_desc_work); in udma_probe()
3624 ud->desc_align = 64; in udma_probe()
3625 if (ud->desc_align < dma_get_cache_alignment()) in udma_probe()
3626 ud->desc_align = dma_get_cache_alignment(); in udma_probe()
3632 for (i = 0; i < ud->tchan_cnt; i++) { in udma_probe()
3633 struct udma_tchan *tchan = &ud->tchans[i]; in udma_probe() local
3635 tchan->id = i; in udma_probe()
3636 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; in udma_probe()
3639 for (i = 0; i < ud->rchan_cnt; i++) { in udma_probe()
3640 struct udma_rchan *rchan = &ud->rchans[i]; in udma_probe()
3642 rchan->id = i; in udma_probe()
3643 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; in udma_probe()
3646 for (i = 0; i < ud->rflow_cnt; i++) { in udma_probe()
3647 struct udma_rflow *rflow = &ud->rflows[i]; in udma_probe()
3649 rflow->id = i; in udma_probe()
3653 struct udma_chan *uc = &ud->channels[i]; in udma_probe()
3655 uc->ud = ud; in udma_probe()
3656 uc->vc.desc_free = udma_desc_free; in udma_probe()
3657 uc->id = i; in udma_probe()
3658 uc->tchan = NULL; in udma_probe()
3659 uc->rchan = NULL; in udma_probe()
3660 uc->config.remote_thread_id = -1; in udma_probe()
3661 uc->config.dir = DMA_MEM_TO_MEM; in udma_probe()
3662 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", in udma_probe()
3665 vchan_init(&uc->vc, &ud->ddev); in udma_probe()
3667 tasklet_setup(&uc->vc.task, udma_vchan_complete); in udma_probe()
3668 init_completion(&uc->teardown_completed); in udma_probe()
3669 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); in udma_probe()
3672 ret = dma_async_device_register(&ud->ddev); in udma_probe()
3680 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); in udma_probe()
3683 dma_async_device_unregister(&ud->ddev); in udma_probe()
3691 .name = "ti-udma",
3700 #include "k3-udma-private.c"