Lines Matching +full:tpl +full:- +full:support

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
10 #include <linux/dma-mapping.h>
26 #include <linux/soc/ti/k3-ringacc.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
357 if (!uc->tchan) in udma_tchanrt_read()
359 return udma_read(uc->tchan->reg_rt, reg); in udma_tchanrt_read()
364 if (!uc->tchan) in udma_tchanrt_write()
366 udma_write(uc->tchan->reg_rt, reg, val); in udma_tchanrt_write()
372 if (!uc->tchan) in udma_tchanrt_update_bits()
374 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); in udma_tchanrt_update_bits()
380 if (!uc->rchan) in udma_rchanrt_read()
382 return udma_read(uc->rchan->reg_rt, reg); in udma_rchanrt_read()
387 if (!uc->rchan) in udma_rchanrt_write()
389 udma_write(uc->rchan->reg_rt, reg, val); in udma_rchanrt_write()
395 if (!uc->rchan) in udma_rchanrt_update_bits()
397 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); in udma_rchanrt_update_bits()
402 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_pair()
405 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, in navss_psil_pair()
406 tisci_rm->tisci_navss_dev_id, in navss_psil_pair()
413 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_unpair()
416 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, in navss_psil_unpair()
417 tisci_rm->tisci_navss_dev_id, in navss_psil_unpair()
423 struct device *chan_dev = &chan->dev->device; in k3_configure_chan_coherency()
427 chan->dev->chan_dma_dev = false; in k3_configure_chan_coherency()
429 chan_dev->dma_coherent = false; in k3_configure_chan_coherency()
430 chan_dev->dma_parms = NULL; in k3_configure_chan_coherency()
432 chan->dev->chan_dma_dev = true; in k3_configure_chan_coherency()
434 chan_dev->dma_coherent = true; in k3_configure_chan_coherency()
436 chan_dev->dma_parms = chan_dev->parent->dma_parms; in k3_configure_chan_coherency()
438 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); in k3_configure_chan_coherency()
440 chan_dev->dma_coherent = false; in k3_configure_chan_coherency()
441 chan_dev->dma_parms = NULL; in k3_configure_chan_coherency()
449 for (i = 0; i < tpl_map->levels; i++) { in udma_get_chan_tpl_index()
450 if (chan_id >= tpl_map->start_idx[i]) in udma_get_chan_tpl_index()
459 memset(&uc->config, 0, sizeof(uc->config)); in udma_reset_uchan()
460 uc->config.remote_thread_id = -1; in udma_reset_uchan()
461 uc->config.mapped_channel_id = -1; in udma_reset_uchan()
462 uc->config.default_flow_id = -1; in udma_reset_uchan()
463 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_uchan()
468 struct device *dev = uc->ud->dev; in udma_dump_chan_stdata()
472 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
481 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
494 return d->hwdesc[idx].cppi5_desc_paddr; in udma_curr_cppi5_desc_paddr()
499 return d->hwdesc[idx].cppi5_desc_vaddr; in udma_curr_cppi5_desc_vaddr()
505 struct udma_desc *d = uc->terminated_desc; in udma_udma_desc_from_paddr()
509 d->desc_idx); in udma_udma_desc_from_paddr()
516 d = uc->desc; in udma_udma_desc_from_paddr()
519 d->desc_idx); in udma_udma_desc_from_paddr()
531 if (uc->use_dma_pool) { in udma_free_hwdesc()
534 for (i = 0; i < d->hwdesc_count; i++) { in udma_free_hwdesc()
535 if (!d->hwdesc[i].cppi5_desc_vaddr) in udma_free_hwdesc()
538 dma_pool_free(uc->hdesc_pool, in udma_free_hwdesc()
539 d->hwdesc[i].cppi5_desc_vaddr, in udma_free_hwdesc()
540 d->hwdesc[i].cppi5_desc_paddr); in udma_free_hwdesc()
542 d->hwdesc[i].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
544 } else if (d->hwdesc[0].cppi5_desc_vaddr) { in udma_free_hwdesc()
545 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, in udma_free_hwdesc()
546 d->hwdesc[0].cppi5_desc_vaddr, in udma_free_hwdesc()
547 d->hwdesc[0].cppi5_desc_paddr); in udma_free_hwdesc()
549 d->hwdesc[0].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
560 spin_lock_irqsave(&ud->lock, flags); in udma_purge_desc_work()
561 list_splice_tail_init(&ud->desc_to_purge, &head); in udma_purge_desc_work()
562 spin_unlock_irqrestore(&ud->lock, flags); in udma_purge_desc_work()
565 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_purge_desc_work()
566 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_purge_desc_work()
569 list_del(&vd->node); in udma_purge_desc_work()
574 if (!list_empty(&ud->desc_to_purge)) in udma_purge_desc_work()
575 schedule_work(&ud->purge_work); in udma_purge_desc_work()
580 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); in udma_desc_free()
581 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_desc_free()
582 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_desc_free()
585 if (uc->terminated_desc == d) in udma_desc_free()
586 uc->terminated_desc = NULL; in udma_desc_free()
588 if (uc->use_dma_pool) { in udma_desc_free()
594 spin_lock_irqsave(&ud->lock, flags); in udma_desc_free()
595 list_add_tail(&vd->node, &ud->desc_to_purge); in udma_desc_free()
596 spin_unlock_irqrestore(&ud->lock, flags); in udma_desc_free()
598 schedule_work(&ud->purge_work); in udma_desc_free()
606 if (uc->tchan) in udma_is_chan_running()
608 if (uc->rchan) in udma_is_chan_running()
621 switch (uc->config.dir) { in udma_is_chan_paused()
646 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; in udma_get_rx_flush_hwdesc_paddr()
651 struct udma_desc *d = uc->desc; in udma_push_to_ring()
655 switch (uc->config.dir) { in udma_push_to_ring()
657 ring = uc->rflow->fd_ring; in udma_push_to_ring()
661 ring = uc->tchan->t_ring; in udma_push_to_ring()
664 return -EINVAL; in udma_push_to_ring()
667 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ in udma_push_to_ring()
668 if (idx == -1) { in udma_push_to_ring()
681 if (uc->config.dir != DMA_DEV_TO_MEM) in udma_desc_is_rx_flush()
695 switch (uc->config.dir) { in udma_pop_from_ring()
697 ring = uc->rflow->r_ring; in udma_pop_from_ring()
701 ring = uc->tchan->tc_ring; in udma_pop_from_ring()
704 return -ENOENT; in udma_pop_from_ring()
719 return -ENOENT; in udma_pop_from_ring()
729 switch (uc->config.dir) { in udma_reset_rings()
731 if (uc->rchan) { in udma_reset_rings()
732 ring1 = uc->rflow->fd_ring; in udma_reset_rings()
733 ring2 = uc->rflow->r_ring; in udma_reset_rings()
738 if (uc->tchan) { in udma_reset_rings()
739 ring1 = uc->tchan->t_ring; in udma_reset_rings()
740 ring2 = uc->tchan->tc_ring; in udma_reset_rings()
754 if (uc->terminated_desc) { in udma_reset_rings()
755 udma_desc_free(&uc->terminated_desc->vd); in udma_reset_rings()
756 uc->terminated_desc = NULL; in udma_reset_rings()
764 if (uc->tchan) { in udma_reset_counters()
774 if (!uc->bchan) { in udma_reset_counters()
780 if (uc->rchan) { in udma_reset_counters()
794 uc->bcnt = 0; in udma_reset_counters()
799 switch (uc->config.dir) { in udma_reset_chan()
813 return -EINVAL; in udma_reset_chan()
819 /* Hard reset: re-initialize the channel to reset */ in udma_reset_chan()
824 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); in udma_reset_chan()
825 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); in udma_reset_chan()
828 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); in udma_reset_chan()
829 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); in udma_reset_chan()
837 if (uc->config.dir == DMA_DEV_TO_MEM) in udma_reset_chan()
843 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_chan()
850 struct udma_chan_config *ucc = &uc->config; in udma_start_desc()
852 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && in udma_start_desc()
853 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { in udma_start_desc()
859 * PKTDMA supports pre-linked descriptor and cyclic is not in udma_start_desc()
862 for (i = 0; i < uc->desc->sglen; i++) in udma_start_desc()
872 if (uc->config.ep_type == PSIL_EP_NATIVE) in udma_chan_needs_reconfiguration()
876 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) in udma_chan_needs_reconfiguration()
884 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); in udma_start()
887 uc->desc = NULL; in udma_start()
888 return -ENOENT; in udma_start()
891 list_del(&vd->node); in udma_start()
893 uc->desc = to_udma_desc(&vd->tx); in udma_start()
907 switch (uc->desc->dir) { in udma_start()
910 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
911 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
912 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
914 uc->ud->match_data; in udma_start()
916 if (uc->config.enable_acc32) in udma_start()
918 if (uc->config.enable_burst) in udma_start()
927 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, in udma_start()
928 match_data->statictr_z_mask)); in udma_start()
931 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
932 sizeof(uc->static_tr)); in udma_start()
945 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
946 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
947 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
949 if (uc->config.enable_acc32) in udma_start()
951 if (uc->config.enable_burst) in udma_start()
959 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
960 sizeof(uc->static_tr)); in udma_start()
979 return -EINVAL; in udma_start()
982 uc->state = UDMA_CHAN_IS_ACTIVE; in udma_start()
990 enum udma_chan_state old_state = uc->state; in udma_stop()
992 uc->state = UDMA_CHAN_IS_TERMINATING; in udma_stop()
993 reinit_completion(&uc->teardown_completed); in udma_stop()
995 switch (uc->config.dir) { in udma_stop()
997 if (!uc->cyclic && !uc->desc) in udma_stop()
998 udma_push_to_ring(uc, -1); in udma_stop()
1018 uc->state = old_state; in udma_stop()
1019 complete_all(&uc->teardown_completed); in udma_stop()
1020 return -EINVAL; in udma_stop()
1028 struct udma_desc *d = uc->desc; in udma_cyclic_packet_elapsed()
1031 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; in udma_cyclic_packet_elapsed()
1033 udma_push_to_ring(uc, d->desc_idx); in udma_cyclic_packet_elapsed()
1034 d->desc_idx = (d->desc_idx + 1) % d->sglen; in udma_cyclic_packet_elapsed()
1039 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_fetch_epib()
1041 memcpy(d->metadata, h_desc->epib, d->metadata_size); in udma_fetch_epib()
1049 if (uc->config.ep_type == PSIL_EP_NATIVE || in udma_is_desc_really_done()
1050 uc->config.dir != DMA_MEM_TO_DEV) in udma_is_desc_really_done()
1058 uc->tx_drain.residue = bcnt - peer_bcnt; in udma_is_desc_really_done()
1059 uc->tx_drain.tstamp = ktime_get(); in udma_is_desc_really_done()
1076 if (uc->desc) { in udma_check_tx_completion()
1078 residue_diff = uc->tx_drain.residue; in udma_check_tx_completion()
1079 time_diff = uc->tx_drain.tstamp; in udma_check_tx_completion()
1084 desc_done = udma_is_desc_really_done(uc, uc->desc); in udma_check_tx_completion()
1092 time_diff = ktime_sub(uc->tx_drain.tstamp, in udma_check_tx_completion()
1094 residue_diff -= uc->tx_drain.residue; in udma_check_tx_completion()
1103 uc->tx_drain.residue; in udma_check_tx_completion()
1106 schedule_delayed_work(&uc->tx_drain.work, HZ); in udma_check_tx_completion()
1115 if (uc->desc) { in udma_check_tx_completion()
1116 struct udma_desc *d = uc->desc; in udma_check_tx_completion()
1118 uc->bcnt += d->residue; in udma_check_tx_completion()
1120 vchan_cookie_complete(&d->vd); in udma_check_tx_completion()
1137 spin_lock(&uc->vc.lock); in udma_ring_irq_handler()
1141 complete_all(&uc->teardown_completed); in udma_ring_irq_handler()
1143 if (uc->terminated_desc) { in udma_ring_irq_handler()
1144 udma_desc_free(&uc->terminated_desc->vd); in udma_ring_irq_handler()
1145 uc->terminated_desc = NULL; in udma_ring_irq_handler()
1148 if (!uc->desc) in udma_ring_irq_handler()
1158 d->desc_idx); in udma_ring_irq_handler()
1160 dev_err(uc->ud->dev, "not matching descriptors!\n"); in udma_ring_irq_handler()
1164 if (d == uc->desc) { in udma_ring_irq_handler()
1166 if (uc->cyclic) { in udma_ring_irq_handler()
1168 vchan_cyclic_callback(&d->vd); in udma_ring_irq_handler()
1171 uc->bcnt += d->residue; in udma_ring_irq_handler()
1173 vchan_cookie_complete(&d->vd); in udma_ring_irq_handler()
1175 schedule_delayed_work(&uc->tx_drain.work, in udma_ring_irq_handler()
1184 dma_cookie_complete(&d->vd.tx); in udma_ring_irq_handler()
1188 spin_unlock(&uc->vc.lock); in udma_ring_irq_handler()
1198 spin_lock(&uc->vc.lock); in udma_udma_irq_handler()
1199 d = uc->desc; in udma_udma_irq_handler()
1201 d->tr_idx = (d->tr_idx + 1) % d->sglen; in udma_udma_irq_handler()
1203 if (uc->cyclic) { in udma_udma_irq_handler()
1204 vchan_cyclic_callback(&d->vd); in udma_udma_irq_handler()
1207 uc->bcnt += d->residue; in udma_udma_irq_handler()
1209 vchan_cookie_complete(&d->vd); in udma_udma_irq_handler()
1213 spin_unlock(&uc->vc.lock); in udma_udma_irq_handler()
1219 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1225 * only using explicit flow id number. if @from is set to -1 it will try to find
1229 * Returns -ENOMEM if can't find free range.
1230 * -EEXIST if requested range is busy.
1231 * -EINVAL if wrong input values passed.
1241 tmp_from = ud->rchan_cnt; in __udma_alloc_gp_rflow_range()
1243 if (tmp_from < ud->rchan_cnt) in __udma_alloc_gp_rflow_range()
1244 return -EINVAL; in __udma_alloc_gp_rflow_range()
1246 if (tmp_from + cnt > ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1247 return -EINVAL; in __udma_alloc_gp_rflow_range()
1249 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, in __udma_alloc_gp_rflow_range()
1250 ud->rflow_cnt); in __udma_alloc_gp_rflow_range()
1253 ud->rflow_cnt, in __udma_alloc_gp_rflow_range()
1255 if (start >= ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1256 return -ENOMEM; in __udma_alloc_gp_rflow_range()
1259 return -EEXIST; in __udma_alloc_gp_rflow_range()
1261 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); in __udma_alloc_gp_rflow_range()
1267 if (from < ud->rchan_cnt) in __udma_free_gp_rflow_range()
1268 return -EINVAL; in __udma_free_gp_rflow_range()
1269 if (from + cnt > ud->rflow_cnt) in __udma_free_gp_rflow_range()
1270 return -EINVAL; in __udma_free_gp_rflow_range()
1272 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); in __udma_free_gp_rflow_range()
1281 * TI-SCI FW will perform additional permission check ant way, it's in __udma_get_rflow()
1285 if (id < 0 || id >= ud->rflow_cnt) in __udma_get_rflow()
1286 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1288 if (test_bit(id, ud->rflow_in_use)) in __udma_get_rflow()
1289 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1291 if (ud->rflow_gp_map) { in __udma_get_rflow()
1293 if (!test_bit(id, ud->rflow_gp_map) && in __udma_get_rflow()
1294 !test_bit(id, ud->rflow_gp_map_allocated)) in __udma_get_rflow()
1295 return ERR_PTR(-EINVAL); in __udma_get_rflow()
1298 dev_dbg(ud->dev, "get rflow%d\n", id); in __udma_get_rflow()
1299 set_bit(id, ud->rflow_in_use); in __udma_get_rflow()
1300 return &ud->rflows[id]; in __udma_get_rflow()
1305 if (!test_bit(rflow->id, ud->rflow_in_use)) { in __udma_put_rflow()
1306 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); in __udma_put_rflow()
1310 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); in __udma_put_rflow()
1311 clear_bit(rflow->id, ud->rflow_in_use); in __udma_put_rflow()
1316 enum udma_tp_level tpl, \
1320 if (test_bit(id, ud->res##_map)) { \
1321 dev_err(ud->dev, "res##%d is in use\n", id); \
1322 return ERR_PTR(-ENOENT); \
1327 if (tpl >= ud->res##_tpl.levels) \
1328 tpl = ud->res##_tpl.levels - 1; \
1330 start = ud->res##_tpl.start_idx[tpl]; \
1332 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1334 if (id == ud->res##_cnt) { \
1335 return ERR_PTR(-ENOENT); \
1339 set_bit(id, ud->res##_map); \
1340 return &ud->res##s[id]; \
1349 struct udma_dev *ud = uc->ud; in bcdma_get_bchan()
1350 enum udma_tp_level tpl; in bcdma_get_bchan() local
1352 if (uc->bchan) { in bcdma_get_bchan()
1353 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", in bcdma_get_bchan()
1354 uc->id, uc->bchan->id); in bcdma_get_bchan()
1359 * Use normal channels for peripherals, and highest TPL channel for in bcdma_get_bchan()
1362 if (uc->config.tr_trigger_type) in bcdma_get_bchan()
1363 tpl = 0; in bcdma_get_bchan()
1365 tpl = ud->bchan_tpl.levels - 1; in bcdma_get_bchan()
1367 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); in bcdma_get_bchan()
1368 if (IS_ERR(uc->bchan)) in bcdma_get_bchan()
1369 return PTR_ERR(uc->bchan); in bcdma_get_bchan()
1371 uc->tchan = uc->bchan; in bcdma_get_bchan()
1378 struct udma_dev *ud = uc->ud; in udma_get_tchan()
1380 if (uc->tchan) { in udma_get_tchan()
1381 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_tchan()
1382 uc->id, uc->tchan->id); in udma_get_tchan()
1387 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. in udma_get_tchan()
1391 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, in udma_get_tchan()
1392 uc->config.mapped_channel_id); in udma_get_tchan()
1393 if (IS_ERR(uc->tchan)) in udma_get_tchan()
1394 return PTR_ERR(uc->tchan); in udma_get_tchan()
1396 if (ud->tflow_cnt) { in udma_get_tchan()
1399 /* Only PKTDMA have support for tx flows */ in udma_get_tchan()
1400 if (uc->config.default_flow_id >= 0) in udma_get_tchan()
1401 tflow_id = uc->config.default_flow_id; in udma_get_tchan()
1403 tflow_id = uc->tchan->id; in udma_get_tchan()
1405 if (test_bit(tflow_id, ud->tflow_map)) { in udma_get_tchan()
1406 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); in udma_get_tchan()
1407 clear_bit(uc->tchan->id, ud->tchan_map); in udma_get_tchan()
1408 uc->tchan = NULL; in udma_get_tchan()
1409 return -ENOENT; in udma_get_tchan()
1412 uc->tchan->tflow_id = tflow_id; in udma_get_tchan()
1413 set_bit(tflow_id, ud->tflow_map); in udma_get_tchan()
1415 uc->tchan->tflow_id = -1; in udma_get_tchan()
1423 struct udma_dev *ud = uc->ud; in udma_get_rchan()
1425 if (uc->rchan) { in udma_get_rchan()
1426 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_rchan()
1427 uc->id, uc->rchan->id); in udma_get_rchan()
1432 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. in udma_get_rchan()
1436 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, in udma_get_rchan()
1437 uc->config.mapped_channel_id); in udma_get_rchan()
1439 return PTR_ERR_OR_ZERO(uc->rchan); in udma_get_rchan()
1444 struct udma_dev *ud = uc->ud; in udma_get_chan_pair()
1447 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { in udma_get_chan_pair()
1448 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", in udma_get_chan_pair()
1449 uc->id, uc->tchan->id); in udma_get_chan_pair()
1453 if (uc->tchan) { in udma_get_chan_pair()
1454 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_chan_pair()
1455 uc->id, uc->tchan->id); in udma_get_chan_pair()
1456 return -EBUSY; in udma_get_chan_pair()
1457 } else if (uc->rchan) { in udma_get_chan_pair()
1458 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_chan_pair()
1459 uc->id, uc->rchan->id); in udma_get_chan_pair()
1460 return -EBUSY; in udma_get_chan_pair()
1464 end = min(ud->tchan_cnt, ud->rchan_cnt); in udma_get_chan_pair()
1466 * Try to use the highest TPL channel pair for MEM_TO_MEM channels in udma_get_chan_pair()
1467 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan in udma_get_chan_pair()
1469 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; in udma_get_chan_pair()
1471 if (!test_bit(chan_id, ud->tchan_map) && in udma_get_chan_pair()
1472 !test_bit(chan_id, ud->rchan_map)) in udma_get_chan_pair()
1477 return -ENOENT; in udma_get_chan_pair()
1479 set_bit(chan_id, ud->tchan_map); in udma_get_chan_pair()
1480 set_bit(chan_id, ud->rchan_map); in udma_get_chan_pair()
1481 uc->tchan = &ud->tchans[chan_id]; in udma_get_chan_pair()
1482 uc->rchan = &ud->rchans[chan_id]; in udma_get_chan_pair()
1485 uc->tchan->tflow_id = -1; in udma_get_chan_pair()
1492 struct udma_dev *ud = uc->ud; in udma_get_rflow()
1494 if (!uc->rchan) { in udma_get_rflow()
1495 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); in udma_get_rflow()
1496 return -EINVAL; in udma_get_rflow()
1499 if (uc->rflow) { in udma_get_rflow()
1500 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", in udma_get_rflow()
1501 uc->id, uc->rflow->id); in udma_get_rflow()
1505 uc->rflow = __udma_get_rflow(ud, flow_id); in udma_get_rflow()
1507 return PTR_ERR_OR_ZERO(uc->rflow); in udma_get_rflow()
1512 struct udma_dev *ud = uc->ud; in bcdma_put_bchan()
1514 if (uc->bchan) { in bcdma_put_bchan()
1515 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, in bcdma_put_bchan()
1516 uc->bchan->id); in bcdma_put_bchan()
1517 clear_bit(uc->bchan->id, ud->bchan_map); in bcdma_put_bchan()
1518 uc->bchan = NULL; in bcdma_put_bchan()
1519 uc->tchan = NULL; in bcdma_put_bchan()
1525 struct udma_dev *ud = uc->ud; in udma_put_rchan()
1527 if (uc->rchan) { in udma_put_rchan()
1528 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, in udma_put_rchan()
1529 uc->rchan->id); in udma_put_rchan()
1530 clear_bit(uc->rchan->id, ud->rchan_map); in udma_put_rchan()
1531 uc->rchan = NULL; in udma_put_rchan()
1537 struct udma_dev *ud = uc->ud; in udma_put_tchan()
1539 if (uc->tchan) { in udma_put_tchan()
1540 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, in udma_put_tchan()
1541 uc->tchan->id); in udma_put_tchan()
1542 clear_bit(uc->tchan->id, ud->tchan_map); in udma_put_tchan()
1544 if (uc->tchan->tflow_id >= 0) in udma_put_tchan()
1545 clear_bit(uc->tchan->tflow_id, ud->tflow_map); in udma_put_tchan()
1547 uc->tchan = NULL; in udma_put_tchan()
1553 struct udma_dev *ud = uc->ud; in udma_put_rflow()
1555 if (uc->rflow) { in udma_put_rflow()
1556 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, in udma_put_rflow()
1557 uc->rflow->id); in udma_put_rflow()
1558 __udma_put_rflow(ud, uc->rflow); in udma_put_rflow()
1559 uc->rflow = NULL; in udma_put_rflow()
1565 if (!uc->bchan) in bcdma_free_bchan_resources()
1568 k3_ringacc_ring_free(uc->bchan->tc_ring); in bcdma_free_bchan_resources()
1569 k3_ringacc_ring_free(uc->bchan->t_ring); in bcdma_free_bchan_resources()
1570 uc->bchan->tc_ring = NULL; in bcdma_free_bchan_resources()
1571 uc->bchan->t_ring = NULL; in bcdma_free_bchan_resources()
1572 k3_configure_chan_coherency(&uc->vc.chan, 0); in bcdma_free_bchan_resources()
1580 struct udma_dev *ud = uc->ud; in bcdma_alloc_bchan_resources()
1587 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, in bcdma_alloc_bchan_resources()
1588 &uc->bchan->t_ring, in bcdma_alloc_bchan_resources()
1589 &uc->bchan->tc_ring); in bcdma_alloc_bchan_resources()
1591 ret = -EBUSY; in bcdma_alloc_bchan_resources()
1600 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); in bcdma_alloc_bchan_resources()
1601 ring_cfg.asel = ud->asel; in bcdma_alloc_bchan_resources()
1602 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in bcdma_alloc_bchan_resources()
1604 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); in bcdma_alloc_bchan_resources()
1611 k3_ringacc_ring_free(uc->bchan->tc_ring); in bcdma_alloc_bchan_resources()
1612 uc->bchan->tc_ring = NULL; in bcdma_alloc_bchan_resources()
1613 k3_ringacc_ring_free(uc->bchan->t_ring); in bcdma_alloc_bchan_resources()
1614 uc->bchan->t_ring = NULL; in bcdma_alloc_bchan_resources()
1615 k3_configure_chan_coherency(&uc->vc.chan, 0); in bcdma_alloc_bchan_resources()
1624 if (!uc->tchan) in udma_free_tx_resources()
1627 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_free_tx_resources()
1628 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_free_tx_resources()
1629 uc->tchan->t_ring = NULL; in udma_free_tx_resources()
1630 uc->tchan->tc_ring = NULL; in udma_free_tx_resources()
1638 struct udma_dev *ud = uc->ud; in udma_alloc_tx_resources()
1646 tchan = uc->tchan; in udma_alloc_tx_resources()
1647 if (tchan->tflow_id >= 0) in udma_alloc_tx_resources()
1648 ring_idx = tchan->tflow_id; in udma_alloc_tx_resources()
1650 ring_idx = ud->bchan_cnt + tchan->id; in udma_alloc_tx_resources()
1652 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, in udma_alloc_tx_resources()
1653 &tchan->t_ring, in udma_alloc_tx_resources()
1654 &tchan->tc_ring); in udma_alloc_tx_resources()
1656 ret = -EBUSY; in udma_alloc_tx_resources()
1663 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_alloc_tx_resources()
1668 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); in udma_alloc_tx_resources()
1669 ring_cfg.asel = uc->config.asel; in udma_alloc_tx_resources()
1670 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in udma_alloc_tx_resources()
1673 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); in udma_alloc_tx_resources()
1674 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); in udma_alloc_tx_resources()
1682 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_alloc_tx_resources()
1683 uc->tchan->tc_ring = NULL; in udma_alloc_tx_resources()
1684 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_alloc_tx_resources()
1685 uc->tchan->t_ring = NULL; in udma_alloc_tx_resources()
1694 if (!uc->rchan) in udma_free_rx_resources()
1697 if (uc->rflow) { in udma_free_rx_resources()
1698 struct udma_rflow *rflow = uc->rflow; in udma_free_rx_resources()
1700 k3_ringacc_ring_free(rflow->fd_ring); in udma_free_rx_resources()
1701 k3_ringacc_ring_free(rflow->r_ring); in udma_free_rx_resources()
1702 rflow->fd_ring = NULL; in udma_free_rx_resources()
1703 rflow->r_ring = NULL; in udma_free_rx_resources()
1713 struct udma_dev *ud = uc->ud; in udma_alloc_rx_resources()
1724 if (uc->config.dir == DMA_MEM_TO_MEM) in udma_alloc_rx_resources()
1727 if (uc->config.default_flow_id >= 0) in udma_alloc_rx_resources()
1728 ret = udma_get_rflow(uc, uc->config.default_flow_id); in udma_alloc_rx_resources()
1730 ret = udma_get_rflow(uc, uc->rchan->id); in udma_alloc_rx_resources()
1733 ret = -EBUSY; in udma_alloc_rx_resources()
1737 rflow = uc->rflow; in udma_alloc_rx_resources()
1738 if (ud->tflow_cnt) in udma_alloc_rx_resources()
1739 fd_ring_id = ud->tflow_cnt + rflow->id; in udma_alloc_rx_resources()
1741 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + in udma_alloc_rx_resources()
1742 uc->rchan->id; in udma_alloc_rx_resources()
1744 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, in udma_alloc_rx_resources()
1745 &rflow->fd_ring, &rflow->r_ring); in udma_alloc_rx_resources()
1747 ret = -EBUSY; in udma_alloc_rx_resources()
1754 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_alloc_rx_resources()
1755 if (uc->config.pkt_mode) in udma_alloc_rx_resources()
1765 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); in udma_alloc_rx_resources()
1766 ring_cfg.asel = uc->config.asel; in udma_alloc_rx_resources()
1767 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in udma_alloc_rx_resources()
1770 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); in udma_alloc_rx_resources()
1773 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); in udma_alloc_rx_resources()
1781 k3_ringacc_ring_free(rflow->r_ring); in udma_alloc_rx_resources()
1782 rflow->r_ring = NULL; in udma_alloc_rx_resources()
1783 k3_ringacc_ring_free(rflow->fd_ring); in udma_alloc_rx_resources()
1784 rflow->fd_ring = NULL; in udma_alloc_rx_resources()
1827 struct udma_dev *ud = uc->ud; in udma_tisci_m2m_channel_config()
1828 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_m2m_channel_config()
1829 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_m2m_channel_config()
1830 struct udma_tchan *tchan = uc->tchan; in udma_tisci_m2m_channel_config()
1831 struct udma_rchan *rchan = uc->rchan; in udma_tisci_m2m_channel_config()
1834 u8 tpl; in udma_tisci_m2m_channel_config() local
1836 /* Non synchronized - mem to mem type of transfer */ in udma_tisci_m2m_channel_config()
1837 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_m2m_channel_config()
1841 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { in udma_tisci_m2m_channel_config()
1842 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); in udma_tisci_m2m_channel_config()
1844 burst_size = ud->match_data->burst_size[tpl]; in udma_tisci_m2m_channel_config()
1848 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1849 req_tx.index = tchan->id; in udma_tisci_m2m_channel_config()
1853 req_tx.tx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1859 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_m2m_channel_config()
1861 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_m2m_channel_config()
1866 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1867 req_rx.index = rchan->id; in udma_tisci_m2m_channel_config()
1871 req_rx.rx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1877 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_m2m_channel_config()
1879 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); in udma_tisci_m2m_channel_config()
1886 struct udma_dev *ud = uc->ud; in bcdma_tisci_m2m_channel_config()
1887 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_m2m_channel_config()
1888 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_m2m_channel_config()
1890 struct udma_bchan *bchan = uc->bchan; in bcdma_tisci_m2m_channel_config()
1893 u8 tpl; in bcdma_tisci_m2m_channel_config() local
1895 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { in bcdma_tisci_m2m_channel_config()
1896 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); in bcdma_tisci_m2m_channel_config()
1898 burst_size = ud->match_data->burst_size[tpl]; in bcdma_tisci_m2m_channel_config()
1902 req_tx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_m2m_channel_config()
1904 req_tx.index = bchan->id; in bcdma_tisci_m2m_channel_config()
1910 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in bcdma_tisci_m2m_channel_config()
1912 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); in bcdma_tisci_m2m_channel_config()
1919 struct udma_dev *ud = uc->ud; in udma_tisci_tx_channel_config()
1920 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_tx_channel_config()
1921 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_tx_channel_config()
1922 struct udma_tchan *tchan = uc->tchan; in udma_tisci_tx_channel_config()
1923 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_tx_channel_config()
1928 if (uc->config.pkt_mode) { in udma_tisci_tx_channel_config()
1930 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_tx_channel_config()
1931 uc->config.psd_size, 0); in udma_tisci_tx_channel_config()
1938 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_tx_channel_config()
1939 req_tx.index = tchan->id; in udma_tisci_tx_channel_config()
1941 req_tx.tx_supr_tdpkt = uc->config.notdpkt; in udma_tisci_tx_channel_config()
1944 req_tx.tx_atype = uc->config.atype; in udma_tisci_tx_channel_config()
1945 if (uc->config.ep_type == PSIL_EP_PDMA_XY && in udma_tisci_tx_channel_config()
1946 ud->match_data->flags & UDMA_FLAG_TDTYPE) { in udma_tisci_tx_channel_config()
1953 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_tx_channel_config()
1955 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_tx_channel_config()
1962 struct udma_dev *ud = uc->ud; in bcdma_tisci_tx_channel_config()
1963 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_tx_channel_config()
1964 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_tx_channel_config()
1965 struct udma_tchan *tchan = uc->tchan; in bcdma_tisci_tx_channel_config()
1970 req_tx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_tx_channel_config()
1971 req_tx.index = tchan->id; in bcdma_tisci_tx_channel_config()
1972 req_tx.tx_supr_tdpkt = uc->config.notdpkt; in bcdma_tisci_tx_channel_config()
1973 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { in bcdma_tisci_tx_channel_config()
1980 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in bcdma_tisci_tx_channel_config()
1982 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in bcdma_tisci_tx_channel_config()
1991 struct udma_dev *ud = uc->ud; in udma_tisci_rx_channel_config()
1992 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_rx_channel_config()
1993 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_rx_channel_config()
1994 struct udma_rchan *rchan = uc->rchan; in udma_tisci_rx_channel_config()
1995 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); in udma_tisci_rx_channel_config()
1996 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_tisci_rx_channel_config()
2002 if (uc->config.pkt_mode) { in udma_tisci_rx_channel_config()
2004 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_rx_channel_config()
2005 uc->config.psd_size, 0); in udma_tisci_rx_channel_config()
2012 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
2013 req_rx.index = rchan->id; in udma_tisci_rx_channel_config()
2017 req_rx.rx_atype = uc->config.atype; in udma_tisci_rx_channel_config()
2019 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_rx_channel_config()
2021 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
2040 flow_req.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
2041 flow_req.flow_index = rchan->id; in udma_tisci_rx_channel_config()
2043 if (uc->config.needs_epib) in udma_tisci_rx_channel_config()
2047 if (uc->config.psd_size) in udma_tisci_rx_channel_config()
2062 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); in udma_tisci_rx_channel_config()
2065 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
2072 struct udma_dev *ud = uc->ud; in bcdma_tisci_rx_channel_config()
2073 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_rx_channel_config()
2074 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_rx_channel_config()
2075 struct udma_rchan *rchan = uc->rchan; in bcdma_tisci_rx_channel_config()
2080 req_rx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_rx_channel_config()
2081 req_rx.index = rchan->id; in bcdma_tisci_rx_channel_config()
2083 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in bcdma_tisci_rx_channel_config()
2085 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); in bcdma_tisci_rx_channel_config()
2092 struct udma_dev *ud = uc->ud; in pktdma_tisci_rx_channel_config()
2093 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in pktdma_tisci_rx_channel_config()
2094 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in pktdma_tisci_rx_channel_config()
2100 req_rx.nav_id = tisci_rm->tisci_dev_id; in pktdma_tisci_rx_channel_config()
2101 req_rx.index = uc->rchan->id; in pktdma_tisci_rx_channel_config()
2103 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in pktdma_tisci_rx_channel_config()
2105 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); in pktdma_tisci_rx_channel_config()
2114 flow_req.nav_id = tisci_rm->tisci_dev_id; in pktdma_tisci_rx_channel_config()
2115 flow_req.flow_index = uc->rflow->id; in pktdma_tisci_rx_channel_config()
2117 if (uc->config.needs_epib) in pktdma_tisci_rx_channel_config()
2121 if (uc->config.psd_size) in pktdma_tisci_rx_channel_config()
2127 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); in pktdma_tisci_rx_channel_config()
2130 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, in pktdma_tisci_rx_channel_config()
2139 struct udma_dev *ud = to_udma_dev(chan->device); in udma_alloc_chan_resources()
2140 const struct udma_soc_data *soc_data = ud->soc_data; in udma_alloc_chan_resources()
2145 uc->dma_dev = ud->dev; in udma_alloc_chan_resources()
2147 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
2148 uc->use_dma_pool = true; in udma_alloc_chan_resources()
2150 if (uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
2151 uc->config.hdesc_size = cppi5_trdesc_calc_size( in udma_alloc_chan_resources()
2153 uc->config.pkt_mode = false; in udma_alloc_chan_resources()
2157 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
2158 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, in udma_alloc_chan_resources()
2159 uc->config.hdesc_size, in udma_alloc_chan_resources()
2160 ud->desc_align, in udma_alloc_chan_resources()
2162 if (!uc->hdesc_pool) { in udma_alloc_chan_resources()
2163 dev_err(ud->ddev.dev, in udma_alloc_chan_resources()
2165 uc->use_dma_pool = false; in udma_alloc_chan_resources()
2166 ret = -ENOMEM; in udma_alloc_chan_resources()
2175 reinit_completion(&uc->teardown_completed); in udma_alloc_chan_resources()
2176 complete_all(&uc->teardown_completed); in udma_alloc_chan_resources()
2177 uc->state = UDMA_CHAN_IS_IDLE; in udma_alloc_chan_resources()
2179 switch (uc->config.dir) { in udma_alloc_chan_resources()
2181 /* Non synchronized - mem to mem type of transfer */ in udma_alloc_chan_resources()
2182 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, in udma_alloc_chan_resources()
2183 uc->id); in udma_alloc_chan_resources()
2201 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
2202 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
2205 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
2206 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
2211 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in udma_alloc_chan_resources()
2212 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in udma_alloc_chan_resources()
2213 uc->id); in udma_alloc_chan_resources()
2219 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
2220 uc->config.dst_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
2221 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in udma_alloc_chan_resources()
2223 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
2224 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
2229 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in udma_alloc_chan_resources()
2230 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in udma_alloc_chan_resources()
2231 uc->id); in udma_alloc_chan_resources()
2237 uc->config.src_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
2238 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
2241 irq_ring = uc->rflow->r_ring; in udma_alloc_chan_resources()
2242 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; in udma_alloc_chan_resources()
2248 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in udma_alloc_chan_resources()
2249 __func__, uc->id, uc->config.dir); in udma_alloc_chan_resources()
2250 ret = -EINVAL; in udma_alloc_chan_resources()
2260 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in udma_alloc_chan_resources()
2263 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in udma_alloc_chan_resources()
2264 ret = -EBUSY; in udma_alloc_chan_resources()
2269 /* PSI-L pairing */ in udma_alloc_chan_resources()
2270 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2272 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in udma_alloc_chan_resources()
2273 uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2277 uc->psil_paired = true; in udma_alloc_chan_resources()
2279 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); in udma_alloc_chan_resources()
2280 if (uc->irq_num_ring <= 0) { in udma_alloc_chan_resources()
2281 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in udma_alloc_chan_resources()
2283 ret = -EINVAL; in udma_alloc_chan_resources()
2287 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in udma_alloc_chan_resources()
2288 IRQF_TRIGGER_HIGH, uc->name, uc); in udma_alloc_chan_resources()
2290 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in udma_alloc_chan_resources()
2295 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { in udma_alloc_chan_resources()
2296 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, in udma_alloc_chan_resources()
2298 if (uc->irq_num_udma <= 0) { in udma_alloc_chan_resources()
2299 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", in udma_alloc_chan_resources()
2301 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
2302 ret = -EINVAL; in udma_alloc_chan_resources()
2306 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, in udma_alloc_chan_resources()
2307 uc->name, uc); in udma_alloc_chan_resources()
2309 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", in udma_alloc_chan_resources()
2310 uc->id); in udma_alloc_chan_resources()
2311 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
2315 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
2323 uc->irq_num_ring = 0; in udma_alloc_chan_resources()
2324 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
2326 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2327 uc->psil_paired = false; in udma_alloc_chan_resources()
2334 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
2335 dma_pool_destroy(uc->hdesc_pool); in udma_alloc_chan_resources()
2336 uc->use_dma_pool = false; in udma_alloc_chan_resources()
2345 struct udma_dev *ud = to_udma_dev(chan->device); in bcdma_alloc_chan_resources()
2346 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in bcdma_alloc_chan_resources()
2351 uc->config.pkt_mode = false; in bcdma_alloc_chan_resources()
2357 reinit_completion(&uc->teardown_completed); in bcdma_alloc_chan_resources()
2358 complete_all(&uc->teardown_completed); in bcdma_alloc_chan_resources()
2359 uc->state = UDMA_CHAN_IS_IDLE; in bcdma_alloc_chan_resources()
2361 switch (uc->config.dir) { in bcdma_alloc_chan_resources()
2363 /* Non synchronized - mem to mem type of transfer */ in bcdma_alloc_chan_resources()
2364 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, in bcdma_alloc_chan_resources()
2365 uc->id); in bcdma_alloc_chan_resources()
2371 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; in bcdma_alloc_chan_resources()
2372 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; in bcdma_alloc_chan_resources()
2377 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in bcdma_alloc_chan_resources()
2378 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in bcdma_alloc_chan_resources()
2379 uc->id); in bcdma_alloc_chan_resources()
2383 uc->config.remote_thread_id = -1; in bcdma_alloc_chan_resources()
2387 uc->config.src_thread = ud->psil_base + uc->tchan->id; in bcdma_alloc_chan_resources()
2388 uc->config.dst_thread = uc->config.remote_thread_id; in bcdma_alloc_chan_resources()
2389 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in bcdma_alloc_chan_resources()
2391 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; in bcdma_alloc_chan_resources()
2392 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; in bcdma_alloc_chan_resources()
2397 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in bcdma_alloc_chan_resources()
2398 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in bcdma_alloc_chan_resources()
2399 uc->id); in bcdma_alloc_chan_resources()
2403 uc->config.remote_thread_id = -1; in bcdma_alloc_chan_resources()
2407 uc->config.src_thread = uc->config.remote_thread_id; in bcdma_alloc_chan_resources()
2408 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in bcdma_alloc_chan_resources()
2411 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; in bcdma_alloc_chan_resources()
2412 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; in bcdma_alloc_chan_resources()
2418 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in bcdma_alloc_chan_resources()
2419 __func__, uc->id, uc->config.dir); in bcdma_alloc_chan_resources()
2420 return -EINVAL; in bcdma_alloc_chan_resources()
2428 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in bcdma_alloc_chan_resources()
2431 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in bcdma_alloc_chan_resources()
2432 ret = -EBUSY; in bcdma_alloc_chan_resources()
2437 uc->dma_dev = dmaengine_get_dma_device(chan); in bcdma_alloc_chan_resources()
2438 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { in bcdma_alloc_chan_resources()
2439 uc->config.hdesc_size = cppi5_trdesc_calc_size( in bcdma_alloc_chan_resources()
2442 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, in bcdma_alloc_chan_resources()
2443 uc->config.hdesc_size, in bcdma_alloc_chan_resources()
2444 ud->desc_align, in bcdma_alloc_chan_resources()
2446 if (!uc->hdesc_pool) { in bcdma_alloc_chan_resources()
2447 dev_err(ud->ddev.dev, in bcdma_alloc_chan_resources()
2449 uc->use_dma_pool = false; in bcdma_alloc_chan_resources()
2450 ret = -ENOMEM; in bcdma_alloc_chan_resources()
2454 uc->use_dma_pool = true; in bcdma_alloc_chan_resources()
2455 } else if (uc->config.dir != DMA_MEM_TO_MEM) { in bcdma_alloc_chan_resources()
2456 /* PSI-L pairing */ in bcdma_alloc_chan_resources()
2457 ret = navss_psil_pair(ud, uc->config.src_thread, in bcdma_alloc_chan_resources()
2458 uc->config.dst_thread); in bcdma_alloc_chan_resources()
2460 dev_err(ud->dev, in bcdma_alloc_chan_resources()
2461 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in bcdma_alloc_chan_resources()
2462 uc->config.src_thread, uc->config.dst_thread); in bcdma_alloc_chan_resources()
2466 uc->psil_paired = true; in bcdma_alloc_chan_resources()
2469 uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); in bcdma_alloc_chan_resources()
2470 if (uc->irq_num_ring <= 0) { in bcdma_alloc_chan_resources()
2471 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in bcdma_alloc_chan_resources()
2473 ret = -EINVAL; in bcdma_alloc_chan_resources()
2477 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in bcdma_alloc_chan_resources()
2478 IRQF_TRIGGER_HIGH, uc->name, uc); in bcdma_alloc_chan_resources()
2480 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in bcdma_alloc_chan_resources()
2485 if (is_slave_direction(uc->config.dir)) { in bcdma_alloc_chan_resources()
2486 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, in bcdma_alloc_chan_resources()
2488 if (uc->irq_num_udma <= 0) { in bcdma_alloc_chan_resources()
2489 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", in bcdma_alloc_chan_resources()
2491 free_irq(uc->irq_num_ring, uc); in bcdma_alloc_chan_resources()
2492 ret = -EINVAL; in bcdma_alloc_chan_resources()
2496 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, in bcdma_alloc_chan_resources()
2497 uc->name, uc); in bcdma_alloc_chan_resources()
2499 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", in bcdma_alloc_chan_resources()
2500 uc->id); in bcdma_alloc_chan_resources()
2501 free_irq(uc->irq_num_ring, uc); in bcdma_alloc_chan_resources()
2505 uc->irq_num_udma = 0; in bcdma_alloc_chan_resources()
2510 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, in bcdma_alloc_chan_resources()
2515 uc->irq_num_ring = 0; in bcdma_alloc_chan_resources()
2516 uc->irq_num_udma = 0; in bcdma_alloc_chan_resources()
2518 if (uc->psil_paired) in bcdma_alloc_chan_resources()
2519 navss_psil_unpair(ud, uc->config.src_thread, in bcdma_alloc_chan_resources()
2520 uc->config.dst_thread); in bcdma_alloc_chan_resources()
2521 uc->psil_paired = false; in bcdma_alloc_chan_resources()
2529 if (uc->use_dma_pool) { in bcdma_alloc_chan_resources()
2530 dma_pool_destroy(uc->hdesc_pool); in bcdma_alloc_chan_resources()
2531 uc->use_dma_pool = false; in bcdma_alloc_chan_resources()
2539 struct k3_event_route_data *router_data = chan->route_data; in bcdma_router_config()
2543 if (!uc->bchan) in bcdma_router_config()
2544 return -EINVAL; in bcdma_router_config()
2546 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) in bcdma_router_config()
2547 return -EINVAL; in bcdma_router_config()
2549 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; in bcdma_router_config()
2550 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; in bcdma_router_config()
2552 return router_data->set_event(router_data->priv, trigger_event); in bcdma_router_config()
2558 struct udma_dev *ud = to_udma_dev(chan->device); in pktdma_alloc_chan_resources()
2559 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in pktdma_alloc_chan_resources()
2567 reinit_completion(&uc->teardown_completed); in pktdma_alloc_chan_resources()
2568 complete_all(&uc->teardown_completed); in pktdma_alloc_chan_resources()
2569 uc->state = UDMA_CHAN_IS_IDLE; in pktdma_alloc_chan_resources()
2571 switch (uc->config.dir) { in pktdma_alloc_chan_resources()
2573 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in pktdma_alloc_chan_resources()
2574 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in pktdma_alloc_chan_resources()
2575 uc->id); in pktdma_alloc_chan_resources()
2579 uc->config.remote_thread_id = -1; in pktdma_alloc_chan_resources()
2583 uc->config.src_thread = ud->psil_base + uc->tchan->id; in pktdma_alloc_chan_resources()
2584 uc->config.dst_thread = uc->config.remote_thread_id; in pktdma_alloc_chan_resources()
2585 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in pktdma_alloc_chan_resources()
2587 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; in pktdma_alloc_chan_resources()
2592 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in pktdma_alloc_chan_resources()
2593 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in pktdma_alloc_chan_resources()
2594 uc->id); in pktdma_alloc_chan_resources()
2598 uc->config.remote_thread_id = -1; in pktdma_alloc_chan_resources()
2602 uc->config.src_thread = uc->config.remote_thread_id; in pktdma_alloc_chan_resources()
2603 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in pktdma_alloc_chan_resources()
2606 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; in pktdma_alloc_chan_resources()
2612 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in pktdma_alloc_chan_resources()
2613 __func__, uc->id, uc->config.dir); in pktdma_alloc_chan_resources()
2614 return -EINVAL; in pktdma_alloc_chan_resources()
2622 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in pktdma_alloc_chan_resources()
2625 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in pktdma_alloc_chan_resources()
2626 ret = -EBUSY; in pktdma_alloc_chan_resources()
2631 uc->dma_dev = dmaengine_get_dma_device(chan); in pktdma_alloc_chan_resources()
2632 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, in pktdma_alloc_chan_resources()
2633 uc->config.hdesc_size, ud->desc_align, in pktdma_alloc_chan_resources()
2635 if (!uc->hdesc_pool) { in pktdma_alloc_chan_resources()
2636 dev_err(ud->ddev.dev, in pktdma_alloc_chan_resources()
2638 uc->use_dma_pool = false; in pktdma_alloc_chan_resources()
2639 ret = -ENOMEM; in pktdma_alloc_chan_resources()
2643 uc->use_dma_pool = true; in pktdma_alloc_chan_resources()
2645 /* PSI-L pairing */ in pktdma_alloc_chan_resources()
2646 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2648 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in pktdma_alloc_chan_resources()
2649 uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2653 uc->psil_paired = true; in pktdma_alloc_chan_resources()
2655 uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); in pktdma_alloc_chan_resources()
2656 if (uc->irq_num_ring <= 0) { in pktdma_alloc_chan_resources()
2657 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in pktdma_alloc_chan_resources()
2659 ret = -EINVAL; in pktdma_alloc_chan_resources()
2663 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in pktdma_alloc_chan_resources()
2664 IRQF_TRIGGER_HIGH, uc->name, uc); in pktdma_alloc_chan_resources()
2666 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in pktdma_alloc_chan_resources()
2670 uc->irq_num_udma = 0; in pktdma_alloc_chan_resources()
2674 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, in pktdma_alloc_chan_resources()
2677 if (uc->tchan) in pktdma_alloc_chan_resources()
2678 dev_dbg(ud->dev, in pktdma_alloc_chan_resources()
2680 uc->id, uc->tchan->id, uc->tchan->tflow_id, in pktdma_alloc_chan_resources()
2681 uc->config.remote_thread_id); in pktdma_alloc_chan_resources()
2682 else if (uc->rchan) in pktdma_alloc_chan_resources()
2683 dev_dbg(ud->dev, in pktdma_alloc_chan_resources()
2685 uc->id, uc->rchan->id, uc->rflow->id, in pktdma_alloc_chan_resources()
2686 uc->config.remote_thread_id); in pktdma_alloc_chan_resources()
2690 uc->irq_num_ring = 0; in pktdma_alloc_chan_resources()
2692 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2693 uc->psil_paired = false; in pktdma_alloc_chan_resources()
2700 dma_pool_destroy(uc->hdesc_pool); in pktdma_alloc_chan_resources()
2701 uc->use_dma_pool = false; in pktdma_alloc_chan_resources()
2711 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); in udma_slave_config()
2733 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); in udma_alloc_tr_desc()
2738 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); in udma_alloc_tr_desc()
2742 d->sglen = tr_count; in udma_alloc_tr_desc()
2744 d->hwdesc_count = 1; in udma_alloc_tr_desc()
2745 hwdesc = &d->hwdesc[0]; in udma_alloc_tr_desc()
2748 if (uc->use_dma_pool) { in udma_alloc_tr_desc()
2749 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_alloc_tr_desc()
2750 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_alloc_tr_desc()
2752 &hwdesc->cppi5_desc_paddr); in udma_alloc_tr_desc()
2754 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, in udma_alloc_tr_desc()
2756 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
2757 uc->ud->desc_align); in udma_alloc_tr_desc()
2758 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, in udma_alloc_tr_desc()
2759 hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
2760 &hwdesc->cppi5_desc_paddr, in udma_alloc_tr_desc()
2764 if (!hwdesc->cppi5_desc_vaddr) { in udma_alloc_tr_desc()
2770 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_alloc_tr_desc()
2772 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; in udma_alloc_tr_desc()
2774 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_alloc_tr_desc()
2776 if (uc->cyclic) in udma_alloc_tr_desc()
2780 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_alloc_tr_desc()
2782 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_alloc_tr_desc()
2785 cppi5_desc_set_pktids(tr_desc, uc->id, in udma_alloc_tr_desc()
2793 * udma_get_tr_counters - calculate TR counters for a given length
2802 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2806 * -EINVAL if the length can not be supported
2822 *tr0_cnt0 = SZ_64K - BIT(align_to); in udma_get_tr_counters()
2825 align_to--; in udma_get_tr_counters()
2828 return -EINVAL; in udma_get_tr_counters()
2866 d->sglen = sglen; in udma_prep_slave_sg_tr()
2868 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_slave_sg_tr()
2871 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_tr()
2873 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_slave_sg_tr()
2880 dev_err(uc->ud->dev, "size %u is not supported\n", in udma_prep_slave_sg_tr()
2912 d->residue += sg_dma_len(sgent); in udma_prep_slave_sg_tr()
2915 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, in udma_prep_slave_sg_tr()
2941 dev_addr = uc->cfg.src_addr; in udma_prep_slave_sg_triggered_tr()
2942 dev_width = uc->cfg.src_addr_width; in udma_prep_slave_sg_triggered_tr()
2943 burst = uc->cfg.src_maxburst; in udma_prep_slave_sg_triggered_tr()
2944 port_window = uc->cfg.src_port_window_size; in udma_prep_slave_sg_triggered_tr()
2946 dev_addr = uc->cfg.dst_addr; in udma_prep_slave_sg_triggered_tr()
2947 dev_width = uc->cfg.dst_addr_width; in udma_prep_slave_sg_triggered_tr()
2948 burst = uc->cfg.dst_maxburst; in udma_prep_slave_sg_triggered_tr()
2949 port_window = uc->cfg.dst_port_window_size; in udma_prep_slave_sg_triggered_tr()
2951 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); in udma_prep_slave_sg_triggered_tr()
2960 dev_err(uc->ud->dev, in udma_prep_slave_sg_triggered_tr()
2978 dev_err(uc->ud->dev, in udma_prep_slave_sg_triggered_tr()
2996 d->sglen = sglen; in udma_prep_slave_sg_triggered_tr()
2998 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { in udma_prep_slave_sg_triggered_tr()
3001 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_triggered_tr()
3005 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_slave_sg_triggered_tr()
3014 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_slave_sg_triggered_tr()
3025 uc->config.tr_trigger_type, in udma_prep_slave_sg_triggered_tr()
3035 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3060 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3072 uc->config.tr_trigger_type, in udma_prep_slave_sg_triggered_tr()
3083 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3106 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3111 d->residue += sg_len; in udma_prep_slave_sg_triggered_tr()
3114 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, in udma_prep_slave_sg_triggered_tr()
3124 if (uc->config.ep_type != PSIL_EP_PDMA_XY) in udma_configure_statictr()
3130 d->static_tr.elsize = 0; in udma_configure_statictr()
3133 d->static_tr.elsize = 1; in udma_configure_statictr()
3136 d->static_tr.elsize = 2; in udma_configure_statictr()
3139 d->static_tr.elsize = 3; in udma_configure_statictr()
3142 d->static_tr.elsize = 4; in udma_configure_statictr()
3145 return -EINVAL; in udma_configure_statictr()
3148 d->static_tr.elcnt = elcnt; in udma_configure_statictr()
3156 if (uc->config.pkt_mode || !uc->cyclic) { in udma_configure_statictr()
3159 if (uc->cyclic) in udma_configure_statictr()
3160 d->static_tr.bstcnt = d->residue / d->sglen / div; in udma_configure_statictr()
3162 d->static_tr.bstcnt = d->residue / div; in udma_configure_statictr()
3164 if (uc->config.dir == DMA_DEV_TO_MEM && in udma_configure_statictr()
3165 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) in udma_configure_statictr()
3166 return -EINVAL; in udma_configure_statictr()
3168 d->static_tr.bstcnt = 0; in udma_configure_statictr()
3190 d->sglen = sglen; in udma_prep_slave_sg_pkt()
3191 d->hwdesc_count = sglen; in udma_prep_slave_sg_pkt()
3194 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_slave_sg_pkt()
3196 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_slave_sg_pkt()
3198 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_slave_sg_pkt()
3201 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_pkt()
3204 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_slave_sg_pkt()
3209 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_slave_sg_pkt()
3211 &hwdesc->cppi5_desc_paddr); in udma_prep_slave_sg_pkt()
3212 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_slave_sg_pkt()
3213 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
3221 d->residue += sg_len; in udma_prep_slave_sg_pkt()
3222 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_slave_sg_pkt()
3223 desc = hwdesc->cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
3228 cppi5_desc_set_pktids(&desc->hdr, uc->id, in udma_prep_slave_sg_pkt()
3230 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); in udma_prep_slave_sg_pkt()
3233 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); in udma_prep_slave_sg_pkt()
3243 hwdesc->cppi5_desc_paddr | asel); in udma_prep_slave_sg_pkt()
3245 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || in udma_prep_slave_sg_pkt()
3250 if (d->residue >= SZ_4M) { in udma_prep_slave_sg_pkt()
3251 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
3253 __func__, d->residue); in udma_prep_slave_sg_pkt()
3259 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
3260 cppi5_hdesc_set_pktlen(h_desc, d->residue); in udma_prep_slave_sg_pkt()
3269 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_attach_metadata()
3274 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_attach_metadata()
3275 return -ENOTSUPP; in udma_attach_metadata()
3277 if (!data || len > uc->config.metadata_size) in udma_attach_metadata()
3278 return -EINVAL; in udma_attach_metadata()
3280 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_attach_metadata()
3281 return -EINVAL; in udma_attach_metadata()
3283 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_attach_metadata()
3284 if (d->dir == DMA_MEM_TO_DEV) in udma_attach_metadata()
3285 memcpy(h_desc->epib, data, len); in udma_attach_metadata()
3287 if (uc->config.needs_epib) in udma_attach_metadata()
3288 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_attach_metadata()
3290 d->metadata = data; in udma_attach_metadata()
3291 d->metadata_size = len; in udma_attach_metadata()
3292 if (uc->config.needs_epib) in udma_attach_metadata()
3305 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_get_metadata_ptr()
3308 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_get_metadata_ptr()
3309 return ERR_PTR(-ENOTSUPP); in udma_get_metadata_ptr()
3311 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_get_metadata_ptr()
3313 *max_len = uc->config.metadata_size; in udma_get_metadata_ptr()
3315 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? in udma_get_metadata_ptr()
3319 return h_desc->epib; in udma_get_metadata_ptr()
3326 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_set_metadata_len()
3331 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_set_metadata_len()
3332 return -ENOTSUPP; in udma_set_metadata_len()
3334 if (payload_len > uc->config.metadata_size) in udma_set_metadata_len()
3335 return -EINVAL; in udma_set_metadata_len()
3337 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_set_metadata_len()
3338 return -EINVAL; in udma_set_metadata_len()
3340 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_set_metadata_len()
3342 if (uc->config.needs_epib) { in udma_set_metadata_len()
3343 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_set_metadata_len()
3369 if (dir != uc->config.dir && in udma_prep_slave_sg()
3370 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { in udma_prep_slave_sg()
3371 dev_err(chan->device->dev, in udma_prep_slave_sg()
3373 __func__, uc->id, in udma_prep_slave_sg()
3374 dmaengine_get_direction_text(uc->config.dir), in udma_prep_slave_sg()
3380 dev_width = uc->cfg.src_addr_width; in udma_prep_slave_sg()
3381 burst = uc->cfg.src_maxburst; in udma_prep_slave_sg()
3383 dev_width = uc->cfg.dst_addr_width; in udma_prep_slave_sg()
3384 burst = uc->cfg.dst_maxburst; in udma_prep_slave_sg()
3386 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); in udma_prep_slave_sg()
3393 if (uc->config.pkt_mode) in udma_prep_slave_sg()
3396 else if (is_slave_direction(uc->config.dir)) in udma_prep_slave_sg()
3406 d->dir = dir; in udma_prep_slave_sg()
3407 d->desc_idx = 0; in udma_prep_slave_sg()
3408 d->tr_idx = 0; in udma_prep_slave_sg()
3412 dev_err(uc->ud->dev, in udma_prep_slave_sg()
3414 __func__, d->static_tr.bstcnt); in udma_prep_slave_sg()
3421 if (uc->config.metadata_size) in udma_prep_slave_sg()
3422 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_slave_sg()
3424 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_slave_sg()
3443 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_cyclic_tr()
3454 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_cyclic_tr()
3455 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_dma_cyclic_tr()
3459 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); in udma_prep_dma_cyclic_tr()
3507 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) in udma_prep_dma_cyclic_pkt()
3517 d->hwdesc_count = periods; in udma_prep_dma_cyclic_pkt()
3519 /* TODO: re-check this... */ in udma_prep_dma_cyclic_pkt()
3521 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_dma_cyclic_pkt()
3523 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_dma_cyclic_pkt()
3525 if (uc->ud->match_data->type != DMA_TYPE_UDMA) in udma_prep_dma_cyclic_pkt()
3526 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_cyclic_pkt()
3529 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_dma_cyclic_pkt()
3533 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_dma_cyclic_pkt()
3535 &hwdesc->cppi5_desc_paddr); in udma_prep_dma_cyclic_pkt()
3536 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_dma_cyclic_pkt()
3537 dev_err(uc->ud->dev, in udma_prep_dma_cyclic_pkt()
3545 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_dma_cyclic_pkt()
3546 h_desc = hwdesc->cppi5_desc_vaddr; in udma_prep_dma_cyclic_pkt()
3552 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, in udma_prep_dma_cyclic_pkt()
3554 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); in udma_prep_dma_cyclic_pkt()
3575 if (dir != uc->config.dir) { in udma_prep_dma_cyclic()
3576 dev_err(chan->device->dev, in udma_prep_dma_cyclic()
3578 __func__, uc->id, in udma_prep_dma_cyclic()
3579 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_cyclic()
3584 uc->cyclic = true; in udma_prep_dma_cyclic()
3587 dev_width = uc->cfg.src_addr_width; in udma_prep_dma_cyclic()
3588 burst = uc->cfg.src_maxburst; in udma_prep_dma_cyclic()
3590 dev_width = uc->cfg.dst_addr_width; in udma_prep_dma_cyclic()
3591 burst = uc->cfg.dst_maxburst; in udma_prep_dma_cyclic()
3593 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); in udma_prep_dma_cyclic()
3600 if (uc->config.pkt_mode) in udma_prep_dma_cyclic()
3610 d->sglen = buf_len / period_len; in udma_prep_dma_cyclic()
3612 d->dir = dir; in udma_prep_dma_cyclic()
3613 d->residue = buf_len; in udma_prep_dma_cyclic()
3617 dev_err(uc->ud->dev, in udma_prep_dma_cyclic()
3619 __func__, d->static_tr.bstcnt); in udma_prep_dma_cyclic()
3626 if (uc->config.metadata_size) in udma_prep_dma_cyclic()
3627 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_cyclic()
3629 return vchan_tx_prep(&uc->vc, &d->vd, flags); in udma_prep_dma_cyclic()
3643 if (uc->config.dir != DMA_MEM_TO_MEM) { in udma_prep_dma_memcpy()
3644 dev_err(chan->device->dev, in udma_prep_dma_memcpy()
3646 __func__, uc->id, in udma_prep_dma_memcpy()
3647 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_memcpy()
3655 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_memcpy()
3664 d->dir = DMA_MEM_TO_MEM; in udma_prep_dma_memcpy()
3665 d->desc_idx = 0; in udma_prep_dma_memcpy()
3666 d->tr_idx = 0; in udma_prep_dma_memcpy()
3667 d->residue = len; in udma_prep_dma_memcpy()
3669 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { in udma_prep_dma_memcpy()
3670 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_memcpy()
3671 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_memcpy()
3674 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_memcpy()
3712 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, in udma_prep_dma_memcpy()
3715 if (uc->config.metadata_size) in udma_prep_dma_memcpy()
3716 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_memcpy()
3718 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_dma_memcpy()
3726 spin_lock_irqsave(&uc->vc.lock, flags); in udma_issue_pending()
3729 if (vchan_issue_pending(&uc->vc) && !uc->desc) { in udma_issue_pending()
3735 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && in udma_issue_pending()
3740 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_issue_pending()
3751 spin_lock_irqsave(&uc->vc.lock, flags); in udma_tx_status()
3764 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { in udma_tx_status()
3767 u32 residue = uc->desc->residue; in udma_tx_status()
3770 if (uc->desc->dir == DMA_MEM_TO_DEV) { in udma_tx_status()
3773 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
3778 delay = bcnt - peer_bcnt; in udma_tx_status()
3780 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { in udma_tx_status()
3783 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
3788 delay = peer_bcnt - bcnt; in udma_tx_status()
3794 bcnt -= uc->bcnt; in udma_tx_status()
3795 if (bcnt && !(bcnt % uc->desc->residue)) in udma_tx_status()
3798 residue -= bcnt % uc->desc->residue; in udma_tx_status()
3800 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { in udma_tx_status()
3813 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_tx_status()
3822 switch (uc->config.dir) { in udma_pause()
3839 return -EINVAL; in udma_pause()
3850 switch (uc->config.dir) { in udma_resume()
3865 return -EINVAL; in udma_resume()
3877 spin_lock_irqsave(&uc->vc.lock, flags); in udma_terminate_all()
3882 if (uc->desc) { in udma_terminate_all()
3883 uc->terminated_desc = uc->desc; in udma_terminate_all()
3884 uc->desc = NULL; in udma_terminate_all()
3885 uc->terminated_desc->terminated = true; in udma_terminate_all()
3886 cancel_delayed_work(&uc->tx_drain.work); in udma_terminate_all()
3889 uc->paused = false; in udma_terminate_all()
3891 vchan_get_all_descriptors(&uc->vc, &head); in udma_terminate_all()
3892 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_terminate_all()
3893 vchan_dma_desc_free_list(&uc->vc, &head); in udma_terminate_all()
3903 vchan_synchronize(&uc->vc); in udma_synchronize()
3905 if (uc->state == UDMA_CHAN_IS_TERMINATING) { in udma_synchronize()
3906 timeout = wait_for_completion_timeout(&uc->teardown_completed, in udma_synchronize()
3909 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", in udma_synchronize()
3910 uc->id); in udma_synchronize()
3918 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); in udma_synchronize()
3920 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_synchronize()
3928 struct udma_chan *uc = to_udma_chan(&vc->chan); in udma_desc_pre_callback()
3934 d = to_udma_desc(&vd->tx); in udma_desc_pre_callback()
3936 if (d->metadata_size) in udma_desc_pre_callback()
3941 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); in udma_desc_pre_callback()
3945 result->residue = d->residue - in udma_desc_pre_callback()
3947 if (result->residue) in udma_desc_pre_callback()
3948 result->result = DMA_TRANS_ABORTED; in udma_desc_pre_callback()
3950 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
3952 result->residue = 0; in udma_desc_pre_callback()
3953 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
3969 spin_lock_irq(&vc->lock); in udma_vchan_complete()
3970 list_splice_tail_init(&vc->desc_completed, &head); in udma_vchan_complete()
3971 vd = vc->cyclic; in udma_vchan_complete()
3973 vc->cyclic = NULL; in udma_vchan_complete()
3974 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
3978 spin_unlock_irq(&vc->lock); in udma_vchan_complete()
3986 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
3988 list_del(&vd->node); in udma_vchan_complete()
4000 struct udma_dev *ud = to_udma_dev(chan->device); in udma_free_chan_resources()
4003 if (uc->terminated_desc) { in udma_free_chan_resources()
4008 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_free_chan_resources()
4010 if (uc->irq_num_ring > 0) { in udma_free_chan_resources()
4011 free_irq(uc->irq_num_ring, uc); in udma_free_chan_resources()
4013 uc->irq_num_ring = 0; in udma_free_chan_resources()
4015 if (uc->irq_num_udma > 0) { in udma_free_chan_resources()
4016 free_irq(uc->irq_num_udma, uc); in udma_free_chan_resources()
4018 uc->irq_num_udma = 0; in udma_free_chan_resources()
4021 /* Release PSI-L pairing */ in udma_free_chan_resources()
4022 if (uc->psil_paired) { in udma_free_chan_resources()
4023 navss_psil_unpair(ud, uc->config.src_thread, in udma_free_chan_resources()
4024 uc->config.dst_thread); in udma_free_chan_resources()
4025 uc->psil_paired = false; in udma_free_chan_resources()
4028 vchan_free_chan_resources(&uc->vc); in udma_free_chan_resources()
4029 tasklet_kill(&uc->vc.task); in udma_free_chan_resources()
4036 if (uc->use_dma_pool) { in udma_free_chan_resources()
4037 dma_pool_destroy(uc->hdesc_pool); in udma_free_chan_resources()
4038 uc->use_dma_pool = false; in udma_free_chan_resources()
4061 if (chan->device->dev->driver != &udma_driver.driver && in udma_dma_filter_fn()
4062 chan->device->dev->driver != &bcdma_driver.driver && in udma_dma_filter_fn()
4063 chan->device->dev->driver != &pktdma_driver.driver) in udma_dma_filter_fn()
4067 ucc = &uc->config; in udma_dma_filter_fn()
4068 ud = uc->ud; in udma_dma_filter_fn()
4071 if (filter_param->atype > 2) { in udma_dma_filter_fn()
4072 dev_err(ud->dev, "Invalid channel atype: %u\n", in udma_dma_filter_fn()
4073 filter_param->atype); in udma_dma_filter_fn()
4077 if (filter_param->asel > 15) { in udma_dma_filter_fn()
4078 dev_err(ud->dev, "Invalid channel asel: %u\n", in udma_dma_filter_fn()
4079 filter_param->asel); in udma_dma_filter_fn()
4083 ucc->remote_thread_id = filter_param->remote_thread_id; in udma_dma_filter_fn()
4084 ucc->atype = filter_param->atype; in udma_dma_filter_fn()
4085 ucc->asel = filter_param->asel; in udma_dma_filter_fn()
4086 ucc->tr_trigger_type = filter_param->tr_trigger_type; in udma_dma_filter_fn()
4088 if (ucc->tr_trigger_type) { in udma_dma_filter_fn()
4089 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4091 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { in udma_dma_filter_fn()
4092 ucc->dir = DMA_MEM_TO_DEV; in udma_dma_filter_fn()
4094 ucc->dir = DMA_DEV_TO_MEM; in udma_dma_filter_fn()
4097 ep_config = psil_get_ep_config(ucc->remote_thread_id); in udma_dma_filter_fn()
4099 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", in udma_dma_filter_fn()
4100 ucc->remote_thread_id); in udma_dma_filter_fn()
4101 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4102 ucc->remote_thread_id = -1; in udma_dma_filter_fn()
4103 ucc->atype = 0; in udma_dma_filter_fn()
4104 ucc->asel = 0; in udma_dma_filter_fn()
4108 if (ud->match_data->type == DMA_TYPE_BCDMA && in udma_dma_filter_fn()
4109 ep_config->pkt_mode) { in udma_dma_filter_fn()
4110 dev_err(ud->dev, in udma_dma_filter_fn()
4111 "Only TR mode is supported (psi-l thread 0x%04x)\n", in udma_dma_filter_fn()
4112 ucc->remote_thread_id); in udma_dma_filter_fn()
4113 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4114 ucc->remote_thread_id = -1; in udma_dma_filter_fn()
4115 ucc->atype = 0; in udma_dma_filter_fn()
4116 ucc->asel = 0; in udma_dma_filter_fn()
4120 ucc->pkt_mode = ep_config->pkt_mode; in udma_dma_filter_fn()
4121 ucc->channel_tpl = ep_config->channel_tpl; in udma_dma_filter_fn()
4122 ucc->notdpkt = ep_config->notdpkt; in udma_dma_filter_fn()
4123 ucc->ep_type = ep_config->ep_type; in udma_dma_filter_fn()
4125 if (ud->match_data->type == DMA_TYPE_PKTDMA && in udma_dma_filter_fn()
4126 ep_config->mapped_channel_id >= 0) { in udma_dma_filter_fn()
4127 ucc->mapped_channel_id = ep_config->mapped_channel_id; in udma_dma_filter_fn()
4128 ucc->default_flow_id = ep_config->default_flow_id; in udma_dma_filter_fn()
4130 ucc->mapped_channel_id = -1; in udma_dma_filter_fn()
4131 ucc->default_flow_id = -1; in udma_dma_filter_fn()
4134 if (ucc->ep_type != PSIL_EP_NATIVE) { in udma_dma_filter_fn()
4135 const struct udma_match_data *match_data = ud->match_data; in udma_dma_filter_fn()
4137 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) in udma_dma_filter_fn()
4138 ucc->enable_acc32 = ep_config->pdma_acc32; in udma_dma_filter_fn()
4139 if (match_data->flags & UDMA_FLAG_PDMA_BURST) in udma_dma_filter_fn()
4140 ucc->enable_burst = ep_config->pdma_burst; in udma_dma_filter_fn()
4143 ucc->needs_epib = ep_config->needs_epib; in udma_dma_filter_fn()
4144 ucc->psd_size = ep_config->psd_size; in udma_dma_filter_fn()
4145 ucc->metadata_size = in udma_dma_filter_fn()
4146 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + in udma_dma_filter_fn()
4147 ucc->psd_size; in udma_dma_filter_fn()
4149 if (ucc->pkt_mode) in udma_dma_filter_fn()
4150 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_dma_filter_fn()
4151 ucc->metadata_size, ud->desc_align); in udma_dma_filter_fn()
4153 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, in udma_dma_filter_fn()
4154 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); in udma_dma_filter_fn()
4159 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, in udma_dma_filter_fn()
4160 ucc->tr_trigger_type); in udma_dma_filter_fn()
4169 struct udma_dev *ud = ofdma->of_dma_data; in udma_of_xlate()
4170 dma_cap_mask_t mask = ud->ddev.cap_mask; in udma_of_xlate()
4174 if (ud->match_data->type == DMA_TYPE_BCDMA) { in udma_of_xlate()
4175 if (dma_spec->args_count != 3) in udma_of_xlate()
4178 filter_param.tr_trigger_type = dma_spec->args[0]; in udma_of_xlate()
4179 filter_param.remote_thread_id = dma_spec->args[1]; in udma_of_xlate()
4180 filter_param.asel = dma_spec->args[2]; in udma_of_xlate()
4183 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) in udma_of_xlate()
4186 filter_param.remote_thread_id = dma_spec->args[0]; in udma_of_xlate()
4188 if (dma_spec->args_count == 2) { in udma_of_xlate()
4189 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_of_xlate()
4190 filter_param.atype = dma_spec->args[1]; in udma_of_xlate()
4194 filter_param.asel = dma_spec->args[1]; in udma_of_xlate()
4203 ofdma->of_node); in udma_of_xlate()
4205 dev_err(ud->dev, "get channel fail in %s.\n", __func__); in udma_of_xlate()
4206 return ERR_PTR(-EINVAL); in udma_of_xlate()
4278 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4290 .compatible = "ti,am654-navss-main-udmap",
4294 .compatible = "ti,am654-navss-mcu-udmap",
4297 .compatible = "ti,j721e-navss-main-udmap",
4300 .compatible = "ti,j721e-navss-mcu-udmap",
4308 .compatible = "ti,am64-dmss-bcdma",
4316 .compatible = "ti,am64-dmss-pktdma",
4367 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); in udma_get_mmrs()
4368 if (IS_ERR(ud->mmrs[MMR_GCFG])) in udma_get_mmrs()
4369 return PTR_ERR(ud->mmrs[MMR_GCFG]); in udma_get_mmrs()
4371 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); in udma_get_mmrs()
4372 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in udma_get_mmrs()
4374 switch (ud->match_data->type) { in udma_get_mmrs()
4376 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); in udma_get_mmrs()
4377 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4378 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); in udma_get_mmrs()
4379 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4382 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); in udma_get_mmrs()
4383 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4384 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4385 ud->rflow_cnt = ud->rchan_cnt; in udma_get_mmrs()
4388 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); in udma_get_mmrs()
4389 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4390 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4391 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); in udma_get_mmrs()
4392 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); in udma_get_mmrs()
4395 return -EINVAL; in udma_get_mmrs()
4399 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) in udma_get_mmrs()
4401 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) in udma_get_mmrs()
4403 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) in udma_get_mmrs()
4406 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); in udma_get_mmrs()
4407 if (IS_ERR(ud->mmrs[i])) in udma_get_mmrs()
4408 return PTR_ERR(ud->mmrs[i]); in udma_get_mmrs()
4418 bitmap_clear(map, rm_desc->start, rm_desc->num); in udma_mark_resource_ranges()
4419 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); in udma_mark_resource_ranges()
4420 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, in udma_mark_resource_ranges()
4421 rm_desc->start, rm_desc->num, rm_desc->start_sec, in udma_mark_resource_ranges()
4422 rm_desc->num_sec); in udma_mark_resource_ranges()
4426 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4427 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4428 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4429 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4430 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4436 struct device *dev = ud->dev; in udma_setup_resources()
4438 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_setup_resources()
4442 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in udma_setup_resources()
4443 if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
4444 "ti,am654-navss-main-udmap")) { in udma_setup_resources()
4445 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4446 ud->tchan_tpl.start_idx[0] = 8; in udma_setup_resources()
4447 } else if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
4448 "ti,am654-navss-mcu-udmap")) { in udma_setup_resources()
4449 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4450 ud->tchan_tpl.start_idx[0] = 2; in udma_setup_resources()
4452 ud->tchan_tpl.levels = 3; in udma_setup_resources()
4453 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); in udma_setup_resources()
4454 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in udma_setup_resources()
4456 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4457 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in udma_setup_resources()
4459 ud->tchan_tpl.levels = 1; in udma_setup_resources()
4462 ud->rchan_tpl.levels = ud->tchan_tpl.levels; in udma_setup_resources()
4463 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; in udma_setup_resources()
4464 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; in udma_setup_resources()
4466 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in udma_setup_resources()
4468 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in udma_setup_resources()
4470 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in udma_setup_resources()
4472 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in udma_setup_resources()
4474 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4477 ud->rflow_gp_map_allocated = devm_kcalloc(dev, in udma_setup_resources()
4478 BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4481 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4484 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), in udma_setup_resources()
4487 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || in udma_setup_resources()
4488 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || in udma_setup_resources()
4489 !ud->rflows || !ud->rflow_in_use) in udma_setup_resources()
4490 return -ENOMEM; in udma_setup_resources()
4497 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); in udma_setup_resources()
4500 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); in udma_setup_resources()
4507 tisci_rm->rm_ranges[i] = in udma_setup_resources()
4508 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in udma_setup_resources()
4509 tisci_rm->tisci_dev_id, in udma_setup_resources()
4514 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
4516 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
4518 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
4519 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4520 udma_mark_resource_ranges(ud, ud->tchan_map, in udma_setup_resources()
4521 &rm_res->desc[i], "tchan"); in udma_setup_resources()
4523 irq_res.sets = rm_res->sets; in udma_setup_resources()
4526 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
4528 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
4530 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
4531 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4532 udma_mark_resource_ranges(ud, ud->rchan_map, in udma_setup_resources()
4533 &rm_res->desc[i], "rchan"); in udma_setup_resources()
4536 irq_res.sets += rm_res->sets; in udma_setup_resources()
4538 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
4539 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
4540 irq_res.desc[i].start = rm_res->desc[i].start; in udma_setup_resources()
4541 irq_res.desc[i].num = rm_res->desc[i].num; in udma_setup_resources()
4542 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; in udma_setup_resources()
4543 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; in udma_setup_resources()
4545 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
4546 for (j = 0; j < rm_res->sets; j++, i++) { in udma_setup_resources()
4547 if (rm_res->desc[j].num) { in udma_setup_resources()
4548 irq_res.desc[i].start = rm_res->desc[j].start + in udma_setup_resources()
4549 ud->soc_data->oes.udma_rchan; in udma_setup_resources()
4550 irq_res.desc[i].num = rm_res->desc[j].num; in udma_setup_resources()
4552 if (rm_res->desc[j].num_sec) { in udma_setup_resources()
4553 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + in udma_setup_resources()
4554 ud->soc_data->oes.udma_rchan; in udma_setup_resources()
4555 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; in udma_setup_resources()
4558 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in udma_setup_resources()
4561 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in udma_setup_resources()
4566 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in udma_setup_resources()
4569 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, in udma_setup_resources()
4570 ud->rflow_cnt - ud->rchan_cnt); in udma_setup_resources()
4572 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4573 udma_mark_resource_ranges(ud, ud->rflow_gp_map, in udma_setup_resources()
4574 &rm_res->desc[i], "gp-rflow"); in udma_setup_resources()
4583 struct device *dev = ud->dev; in bcdma_setup_resources()
4585 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_setup_resources()
4586 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in bcdma_setup_resources()
4590 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in bcdma_setup_resources()
4592 ud->bchan_tpl.levels = 3; in bcdma_setup_resources()
4593 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); in bcdma_setup_resources()
4594 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); in bcdma_setup_resources()
4596 ud->bchan_tpl.levels = 2; in bcdma_setup_resources()
4597 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); in bcdma_setup_resources()
4599 ud->bchan_tpl.levels = 1; in bcdma_setup_resources()
4602 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); in bcdma_setup_resources()
4604 ud->rchan_tpl.levels = 3; in bcdma_setup_resources()
4605 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); in bcdma_setup_resources()
4606 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); in bcdma_setup_resources()
4608 ud->rchan_tpl.levels = 2; in bcdma_setup_resources()
4609 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); in bcdma_setup_resources()
4611 ud->rchan_tpl.levels = 1; in bcdma_setup_resources()
4615 ud->tchan_tpl.levels = 3; in bcdma_setup_resources()
4616 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); in bcdma_setup_resources()
4617 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); in bcdma_setup_resources()
4619 ud->tchan_tpl.levels = 2; in bcdma_setup_resources()
4620 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); in bcdma_setup_resources()
4622 ud->tchan_tpl.levels = 1; in bcdma_setup_resources()
4625 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), in bcdma_setup_resources()
4627 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), in bcdma_setup_resources()
4629 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in bcdma_setup_resources()
4631 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in bcdma_setup_resources()
4633 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in bcdma_setup_resources()
4635 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in bcdma_setup_resources()
4638 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), in bcdma_setup_resources()
4641 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), in bcdma_setup_resources()
4644 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || in bcdma_setup_resources()
4645 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || in bcdma_setup_resources()
4646 !ud->rflows) in bcdma_setup_resources()
4647 return -ENOMEM; in bcdma_setup_resources()
4653 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) in bcdma_setup_resources()
4655 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) in bcdma_setup_resources()
4657 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) in bcdma_setup_resources()
4660 tisci_rm->rm_ranges[i] = in bcdma_setup_resources()
4661 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in bcdma_setup_resources()
4662 tisci_rm->tisci_dev_id, in bcdma_setup_resources()
4669 if (ud->bchan_cnt) { in bcdma_setup_resources()
4670 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; in bcdma_setup_resources()
4672 bitmap_zero(ud->bchan_map, ud->bchan_cnt); in bcdma_setup_resources()
4674 bitmap_fill(ud->bchan_map, ud->bchan_cnt); in bcdma_setup_resources()
4675 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4676 udma_mark_resource_ranges(ud, ud->bchan_map, in bcdma_setup_resources()
4677 &rm_res->desc[i], in bcdma_setup_resources()
4680 irq_res.sets += rm_res->sets; in bcdma_setup_resources()
4684 if (ud->tchan_cnt) { in bcdma_setup_resources()
4685 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in bcdma_setup_resources()
4687 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in bcdma_setup_resources()
4689 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in bcdma_setup_resources()
4690 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4691 udma_mark_resource_ranges(ud, ud->tchan_map, in bcdma_setup_resources()
4692 &rm_res->desc[i], in bcdma_setup_resources()
4695 irq_res.sets += rm_res->sets * 2; in bcdma_setup_resources()
4699 if (ud->rchan_cnt) { in bcdma_setup_resources()
4700 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in bcdma_setup_resources()
4702 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in bcdma_setup_resources()
4704 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in bcdma_setup_resources()
4705 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4706 udma_mark_resource_ranges(ud, ud->rchan_map, in bcdma_setup_resources()
4707 &rm_res->desc[i], in bcdma_setup_resources()
4710 irq_res.sets += rm_res->sets * 2; in bcdma_setup_resources()
4714 if (ud->bchan_cnt) { in bcdma_setup_resources()
4715 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; in bcdma_setup_resources()
4716 for (i = 0; i < rm_res->sets; i++) { in bcdma_setup_resources()
4717 irq_res.desc[i].start = rm_res->desc[i].start + in bcdma_setup_resources()
4718 oes->bcdma_bchan_ring; in bcdma_setup_resources()
4719 irq_res.desc[i].num = rm_res->desc[i].num; in bcdma_setup_resources()
4722 if (ud->tchan_cnt) { in bcdma_setup_resources()
4723 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in bcdma_setup_resources()
4724 for (j = 0; j < rm_res->sets; j++, i += 2) { in bcdma_setup_resources()
4725 irq_res.desc[i].start = rm_res->desc[j].start + in bcdma_setup_resources()
4726 oes->bcdma_tchan_data; in bcdma_setup_resources()
4727 irq_res.desc[i].num = rm_res->desc[j].num; in bcdma_setup_resources()
4729 irq_res.desc[i + 1].start = rm_res->desc[j].start + in bcdma_setup_resources()
4730 oes->bcdma_tchan_ring; in bcdma_setup_resources()
4731 irq_res.desc[i + 1].num = rm_res->desc[j].num; in bcdma_setup_resources()
4734 if (ud->rchan_cnt) { in bcdma_setup_resources()
4735 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in bcdma_setup_resources()
4736 for (j = 0; j < rm_res->sets; j++, i += 2) { in bcdma_setup_resources()
4737 irq_res.desc[i].start = rm_res->desc[j].start + in bcdma_setup_resources()
4738 oes->bcdma_rchan_data; in bcdma_setup_resources()
4739 irq_res.desc[i].num = rm_res->desc[j].num; in bcdma_setup_resources()
4741 irq_res.desc[i + 1].start = rm_res->desc[j].start + in bcdma_setup_resources()
4742 oes->bcdma_rchan_ring; in bcdma_setup_resources()
4743 irq_res.desc[i + 1].num = rm_res->desc[j].num; in bcdma_setup_resources()
4747 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in bcdma_setup_resources()
4750 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in bcdma_setup_resources()
4760 struct device *dev = ud->dev; in pktdma_setup_resources()
4762 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in pktdma_setup_resources()
4763 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in pktdma_setup_resources()
4767 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in pktdma_setup_resources()
4769 ud->tchan_tpl.levels = 3; in pktdma_setup_resources()
4770 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); in pktdma_setup_resources()
4771 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in pktdma_setup_resources()
4773 ud->tchan_tpl.levels = 2; in pktdma_setup_resources()
4774 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in pktdma_setup_resources()
4776 ud->tchan_tpl.levels = 1; in pktdma_setup_resources()
4779 ud->rchan_tpl.levels = ud->tchan_tpl.levels; in pktdma_setup_resources()
4780 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; in pktdma_setup_resources()
4781 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; in pktdma_setup_resources()
4783 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in pktdma_setup_resources()
4785 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in pktdma_setup_resources()
4787 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in pktdma_setup_resources()
4789 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in pktdma_setup_resources()
4791 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), in pktdma_setup_resources()
4794 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), in pktdma_setup_resources()
4796 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), in pktdma_setup_resources()
4799 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || in pktdma_setup_resources()
4800 !ud->rchans || !ud->rflows || !ud->rflow_in_use) in pktdma_setup_resources()
4801 return -ENOMEM; in pktdma_setup_resources()
4808 tisci_rm->rm_ranges[i] = in pktdma_setup_resources()
4809 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in pktdma_setup_resources()
4810 tisci_rm->tisci_dev_id, in pktdma_setup_resources()
4815 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in pktdma_setup_resources()
4817 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in pktdma_setup_resources()
4819 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in pktdma_setup_resources()
4820 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
4821 udma_mark_resource_ranges(ud, ud->tchan_map, in pktdma_setup_resources()
4822 &rm_res->desc[i], "tchan"); in pktdma_setup_resources()
4826 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in pktdma_setup_resources()
4828 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in pktdma_setup_resources()
4830 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in pktdma_setup_resources()
4831 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
4832 udma_mark_resource_ranges(ud, ud->rchan_map, in pktdma_setup_resources()
4833 &rm_res->desc[i], "rchan"); in pktdma_setup_resources()
4837 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in pktdma_setup_resources()
4840 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); in pktdma_setup_resources()
4842 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); in pktdma_setup_resources()
4843 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
4844 udma_mark_resource_ranges(ud, ud->rflow_in_use, in pktdma_setup_resources()
4845 &rm_res->desc[i], "rflow"); in pktdma_setup_resources()
4847 irq_res.sets = rm_res->sets; in pktdma_setup_resources()
4850 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; in pktdma_setup_resources()
4853 bitmap_zero(ud->tflow_map, ud->tflow_cnt); in pktdma_setup_resources()
4855 bitmap_fill(ud->tflow_map, ud->tflow_cnt); in pktdma_setup_resources()
4856 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
4857 udma_mark_resource_ranges(ud, ud->tflow_map, in pktdma_setup_resources()
4858 &rm_res->desc[i], "tflow"); in pktdma_setup_resources()
4860 irq_res.sets += rm_res->sets; in pktdma_setup_resources()
4863 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; in pktdma_setup_resources()
4864 for (i = 0; i < rm_res->sets; i++) { in pktdma_setup_resources()
4865 irq_res.desc[i].start = rm_res->desc[i].start + in pktdma_setup_resources()
4866 oes->pktdma_tchan_flow; in pktdma_setup_resources()
4867 irq_res.desc[i].num = rm_res->desc[i].num; in pktdma_setup_resources()
4869 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in pktdma_setup_resources()
4870 for (j = 0; j < rm_res->sets; j++, i++) { in pktdma_setup_resources()
4871 irq_res.desc[i].start = rm_res->desc[j].start + in pktdma_setup_resources()
4872 oes->pktdma_rchan_flow; in pktdma_setup_resources()
4873 irq_res.desc[i].num = rm_res->desc[j].num; in pktdma_setup_resources()
4875 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in pktdma_setup_resources()
4878 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in pktdma_setup_resources()
4887 struct device *dev = ud->dev; in setup_resources()
4890 switch (ud->match_data->type) { in setup_resources()
4901 return -EINVAL; in setup_resources()
4907 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; in setup_resources()
4908 if (ud->bchan_cnt) in setup_resources()
4909 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); in setup_resources()
4910 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); in setup_resources()
4911 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); in setup_resources()
4913 return -ENODEV; in setup_resources()
4915 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), in setup_resources()
4917 if (!ud->channels) in setup_resources()
4918 return -ENOMEM; in setup_resources()
4920 switch (ud->match_data->type) { in setup_resources()
4923 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", in setup_resources()
4925 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
4926 ud->tchan_cnt), in setup_resources()
4927 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
4928 ud->rchan_cnt), in setup_resources()
4929 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, in setup_resources()
4930 ud->rflow_cnt)); in setup_resources()
4936 ud->bchan_cnt - bitmap_weight(ud->bchan_map, in setup_resources()
4937 ud->bchan_cnt), in setup_resources()
4938 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
4939 ud->tchan_cnt), in setup_resources()
4940 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
4941 ud->rchan_cnt)); in setup_resources()
4947 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
4948 ud->tchan_cnt), in setup_resources()
4949 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
4950 ud->rchan_cnt)); in setup_resources()
4961 struct udma_rx_flush *rx_flush = &ud->rx_flush; in udma_setup_rx_flush()
4965 struct device *dev = ud->dev; in udma_setup_rx_flush()
4970 rx_flush->buffer_size = SZ_1K; in udma_setup_rx_flush()
4971 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, in udma_setup_rx_flush()
4973 if (!rx_flush->buffer_vaddr) in udma_setup_rx_flush()
4974 return -ENOMEM; in udma_setup_rx_flush()
4976 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, in udma_setup_rx_flush()
4977 rx_flush->buffer_size, in udma_setup_rx_flush()
4979 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) in udma_setup_rx_flush()
4980 return -ENOMEM; in udma_setup_rx_flush()
4983 hwdesc = &rx_flush->hwdescs[0]; in udma_setup_rx_flush()
4985 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); in udma_setup_rx_flush()
4986 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
4987 ud->desc_align); in udma_setup_rx_flush()
4989 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
4991 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
4992 return -ENOMEM; in udma_setup_rx_flush()
4994 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
4995 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
4997 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
4998 return -ENOMEM; in udma_setup_rx_flush()
5001 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_setup_rx_flush()
5003 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; in udma_setup_rx_flush()
5005 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
5010 tr_req = hwdesc->tr_req_base; in udma_setup_rx_flush()
5011 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, in udma_setup_rx_flush()
5013 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); in udma_setup_rx_flush()
5015 tr_req->addr = rx_flush->buffer_paddr; in udma_setup_rx_flush()
5016 tr_req->icnt0 = rx_flush->buffer_size; in udma_setup_rx_flush()
5017 tr_req->icnt1 = 1; in udma_setup_rx_flush()
5019 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
5020 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
5023 hwdesc = &rx_flush->hwdescs[1]; in udma_setup_rx_flush()
5024 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_setup_rx_flush()
5027 ud->desc_align); in udma_setup_rx_flush()
5029 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5031 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
5032 return -ENOMEM; in udma_setup_rx_flush()
5034 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
5035 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5037 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
5038 return -ENOMEM; in udma_setup_rx_flush()
5040 desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
5042 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); in udma_setup_rx_flush()
5043 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); in udma_setup_rx_flush()
5046 rx_flush->buffer_paddr, rx_flush->buffer_size, in udma_setup_rx_flush()
5047 rx_flush->buffer_paddr, rx_flush->buffer_size); in udma_setup_rx_flush()
5049 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
5050 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
5059 struct udma_chan_config *ucc = &uc->config; in udma_dbg_summary_show_chan()
5061 seq_printf(s, " %-13s| %s", dma_chan_name(chan), in udma_dbg_summary_show_chan()
5062 chan->dbg_client_name ?: "in-use"); in udma_dbg_summary_show_chan()
5063 if (ucc->tr_trigger_type) in udma_dbg_summary_show_chan()
5067 dmaengine_get_direction_text(uc->config.dir)); in udma_dbg_summary_show_chan()
5069 switch (uc->config.dir) { in udma_dbg_summary_show_chan()
5071 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { in udma_dbg_summary_show_chan()
5072 seq_printf(s, "bchan%d)\n", uc->bchan->id); in udma_dbg_summary_show_chan()
5076 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
5077 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5080 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, in udma_dbg_summary_show_chan()
5081 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5082 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) in udma_dbg_summary_show_chan()
5083 seq_printf(s, "rflow%d, ", uc->rflow->id); in udma_dbg_summary_show_chan()
5086 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
5087 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5088 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) in udma_dbg_summary_show_chan()
5089 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); in udma_dbg_summary_show_chan()
5096 if (ucc->ep_type == PSIL_EP_NATIVE) { in udma_dbg_summary_show_chan()
5097 seq_printf(s, "PSI-L Native"); in udma_dbg_summary_show_chan()
5098 if (ucc->metadata_size) { in udma_dbg_summary_show_chan()
5099 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); in udma_dbg_summary_show_chan()
5100 if (ucc->psd_size) in udma_dbg_summary_show_chan()
5101 seq_printf(s, " PSDsize:%u", ucc->psd_size); in udma_dbg_summary_show_chan()
5106 if (ucc->enable_acc32 || ucc->enable_burst) in udma_dbg_summary_show_chan()
5108 ucc->enable_acc32 ? " ACC32" : "", in udma_dbg_summary_show_chan()
5109 ucc->enable_burst ? " BURST" : ""); in udma_dbg_summary_show_chan()
5112 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); in udma_dbg_summary_show_chan()
5120 list_for_each_entry(chan, &dma_dev->channels, device_node) { in udma_dbg_summary_show()
5121 if (chan->client_count) in udma_dbg_summary_show()
5129 const struct udma_match_data *match_data = ud->match_data; in udma_get_copy_align()
5130 u8 tpl; in udma_get_copy_align() local
5132 if (!match_data->enable_memcpy_support) in udma_get_copy_align()
5135 /* Get the highest TPL level the device supports for memcpy */ in udma_get_copy_align()
5136 if (ud->bchan_cnt) in udma_get_copy_align()
5137 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); in udma_get_copy_align()
5138 else if (ud->tchan_cnt) in udma_get_copy_align()
5139 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); in udma_get_copy_align()
5143 switch (match_data->burst_size[tpl]) { in udma_get_copy_align()
5163 struct device_node *navss_node = pdev->dev.parent->of_node; in udma_probe()
5165 struct device *dev = &pdev->dev; in udma_probe()
5177 return -ENOMEM; in udma_probe()
5179 match = of_match_node(udma_of_match, dev->of_node); in udma_probe()
5181 match = of_match_node(bcdma_of_match, dev->of_node); in udma_probe()
5183 match = of_match_node(pktdma_of_match, dev->of_node); in udma_probe()
5186 return -ENODEV; in udma_probe()
5189 ud->match_data = match->data; in udma_probe()
5194 return -ENODEV; in udma_probe()
5196 ud->soc_data = soc->data; in udma_probe()
5202 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); in udma_probe()
5203 if (IS_ERR(ud->tisci_rm.tisci)) in udma_probe()
5204 return PTR_ERR(ud->tisci_rm.tisci); in udma_probe()
5206 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", in udma_probe()
5207 &ud->tisci_rm.tisci_dev_id); in udma_probe()
5209 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); in udma_probe()
5212 pdev->id = ud->tisci_rm.tisci_dev_id; in udma_probe()
5214 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", in udma_probe()
5215 &ud->tisci_rm.tisci_navss_dev_id); in udma_probe()
5217 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); in udma_probe()
5221 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_probe()
5222 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", in udma_probe()
5223 &ud->atype); in udma_probe()
5224 if (!ret && ud->atype > 2) { in udma_probe()
5225 dev_err(dev, "Invalid atype: %u\n", ud->atype); in udma_probe()
5226 return -EINVAL; in udma_probe()
5229 ret = of_property_read_u32(dev->of_node, "ti,asel", in udma_probe()
5230 &ud->asel); in udma_probe()
5231 if (!ret && ud->asel > 15) { in udma_probe()
5232 dev_err(dev, "Invalid asel: %u\n", ud->asel); in udma_probe()
5233 return -EINVAL; in udma_probe()
5237 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; in udma_probe()
5238 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; in udma_probe()
5240 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_probe()
5241 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); in udma_probe()
5245 ring_init_data.tisci = ud->tisci_rm.tisci; in udma_probe()
5246 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; in udma_probe()
5247 if (ud->match_data->type == DMA_TYPE_BCDMA) { in udma_probe()
5248 ring_init_data.num_rings = ud->bchan_cnt + in udma_probe()
5249 ud->tchan_cnt + in udma_probe()
5250 ud->rchan_cnt; in udma_probe()
5252 ring_init_data.num_rings = ud->rflow_cnt + in udma_probe()
5253 ud->tflow_cnt; in udma_probe()
5256 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); in udma_probe()
5259 if (IS_ERR(ud->ringacc)) in udma_probe()
5260 return PTR_ERR(ud->ringacc); in udma_probe()
5262 dev->msi_domain = of_msi_get_domain(dev, dev->of_node, in udma_probe()
5264 if (!dev->msi_domain) { in udma_probe()
5266 return -EPROBE_DEFER; in udma_probe()
5269 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); in udma_probe()
5271 if (ud->match_data->type != DMA_TYPE_PKTDMA) { in udma_probe()
5272 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); in udma_probe()
5273 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; in udma_probe()
5276 ud->ddev.device_config = udma_slave_config; in udma_probe()
5277 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; in udma_probe()
5278 ud->ddev.device_issue_pending = udma_issue_pending; in udma_probe()
5279 ud->ddev.device_tx_status = udma_tx_status; in udma_probe()
5280 ud->ddev.device_pause = udma_pause; in udma_probe()
5281 ud->ddev.device_resume = udma_resume; in udma_probe()
5282 ud->ddev.device_terminate_all = udma_terminate_all; in udma_probe()
5283 ud->ddev.device_synchronize = udma_synchronize; in udma_probe()
5285 ud->ddev.dbg_summary_show = udma_dbg_summary_show; in udma_probe()
5288 switch (ud->match_data->type) { in udma_probe()
5290 ud->ddev.device_alloc_chan_resources = in udma_probe()
5294 ud->ddev.device_alloc_chan_resources = in udma_probe()
5296 ud->ddev.device_router_config = bcdma_router_config; in udma_probe()
5299 ud->ddev.device_alloc_chan_resources = in udma_probe()
5303 return -EINVAL; in udma_probe()
5305 ud->ddev.device_free_chan_resources = udma_free_chan_resources; in udma_probe()
5307 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
5308 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
5309 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in udma_probe()
5310 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in udma_probe()
5311 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | in udma_probe()
5313 if (ud->match_data->enable_memcpy_support && in udma_probe()
5314 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { in udma_probe()
5315 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); in udma_probe()
5316 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; in udma_probe()
5317 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); in udma_probe()
5320 ud->ddev.dev = dev; in udma_probe()
5321 ud->dev = dev; in udma_probe()
5322 ud->psil_base = ud->match_data->psil_base; in udma_probe()
5324 INIT_LIST_HEAD(&ud->ddev.channels); in udma_probe()
5325 INIT_LIST_HEAD(&ud->desc_to_purge); in udma_probe()
5331 spin_lock_init(&ud->lock); in udma_probe()
5332 INIT_WORK(&ud->purge_work, udma_purge_desc_work); in udma_probe()
5334 ud->desc_align = 64; in udma_probe()
5335 if (ud->desc_align < dma_get_cache_alignment()) in udma_probe()
5336 ud->desc_align = dma_get_cache_alignment(); in udma_probe()
5342 for (i = 0; i < ud->bchan_cnt; i++) { in udma_probe()
5343 struct udma_bchan *bchan = &ud->bchans[i]; in udma_probe()
5345 bchan->id = i; in udma_probe()
5346 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; in udma_probe()
5349 for (i = 0; i < ud->tchan_cnt; i++) { in udma_probe()
5350 struct udma_tchan *tchan = &ud->tchans[i]; in udma_probe()
5352 tchan->id = i; in udma_probe()
5353 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; in udma_probe()
5356 for (i = 0; i < ud->rchan_cnt; i++) { in udma_probe()
5357 struct udma_rchan *rchan = &ud->rchans[i]; in udma_probe()
5359 rchan->id = i; in udma_probe()
5360 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; in udma_probe()
5363 for (i = 0; i < ud->rflow_cnt; i++) { in udma_probe()
5364 struct udma_rflow *rflow = &ud->rflows[i]; in udma_probe()
5366 rflow->id = i; in udma_probe()
5370 struct udma_chan *uc = &ud->channels[i]; in udma_probe()
5372 uc->ud = ud; in udma_probe()
5373 uc->vc.desc_free = udma_desc_free; in udma_probe()
5374 uc->id = i; in udma_probe()
5375 uc->bchan = NULL; in udma_probe()
5376 uc->tchan = NULL; in udma_probe()
5377 uc->rchan = NULL; in udma_probe()
5378 uc->config.remote_thread_id = -1; in udma_probe()
5379 uc->config.mapped_channel_id = -1; in udma_probe()
5380 uc->config.default_flow_id = -1; in udma_probe()
5381 uc->config.dir = DMA_MEM_TO_MEM; in udma_probe()
5382 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", in udma_probe()
5385 vchan_init(&uc->vc, &ud->ddev); in udma_probe()
5387 tasklet_setup(&uc->vc.task, udma_vchan_complete); in udma_probe()
5388 init_completion(&uc->teardown_completed); in udma_probe()
5389 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); in udma_probe()
5393 ud->ddev.copy_align = udma_get_copy_align(ud); in udma_probe()
5395 ret = dma_async_device_register(&ud->ddev); in udma_probe()
5403 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); in udma_probe()
5406 dma_async_device_unregister(&ud->ddev); in udma_probe()
5414 .name = "ti-udma",
5424 .name = "ti-bcdma",
5434 .name = "ti-pktdma",
5443 #include "k3-udma-private.c"