Lines Matching +full:cpsw +full:- +full:switch

1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/dma-mapping.h>
163 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
165 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166 #define chan_linear(chan) __chan_linear((chan)->chan_num)
173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
174 #define chan_read(chan, fld) readl((chan)->fld)
175 #define desc_read(desc, fld) readl(&(desc)->fld)
176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
177 #define chan_write(chan, fld, v) writel(v, (chan)->fld)
178 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
192 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_desc_pool_destroy()
197 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in cpdma_desc_pool_destroy()
199 gen_pool_size(pool->gen_pool), in cpdma_desc_pool_destroy()
200 gen_pool_avail(pool->gen_pool)); in cpdma_desc_pool_destroy()
201 if (pool->cpumap) in cpdma_desc_pool_destroy()
202 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, in cpdma_desc_pool_destroy()
203 pool->phys); in cpdma_desc_pool_destroy()
208 * emac) have dedicated on-chip memory for these descriptors. Some other
209 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
214 struct cpdma_params *cpdma_params = &ctlr->params; in cpdma_desc_pool_create()
216 int ret = -ENOMEM; in cpdma_desc_pool_create()
218 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); in cpdma_desc_pool_create()
221 ctlr->pool = pool; in cpdma_desc_pool_create()
223 pool->mem_size = cpdma_params->desc_mem_size; in cpdma_desc_pool_create()
224 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), in cpdma_desc_pool_create()
225 cpdma_params->desc_align); in cpdma_desc_pool_create()
226 pool->num_desc = pool->mem_size / pool->desc_size; in cpdma_desc_pool_create()
228 if (cpdma_params->descs_pool_size) { in cpdma_desc_pool_create()
232 * then switch to use DDR in cpdma_desc_pool_create()
234 pool->num_desc = cpdma_params->descs_pool_size; in cpdma_desc_pool_create()
235 pool->mem_size = pool->desc_size * pool->num_desc; in cpdma_desc_pool_create()
236 if (pool->mem_size > cpdma_params->desc_mem_size) in cpdma_desc_pool_create()
237 cpdma_params->desc_mem_phys = 0; in cpdma_desc_pool_create()
240 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), in cpdma_desc_pool_create()
241 -1, "cpdma"); in cpdma_desc_pool_create()
242 if (IS_ERR(pool->gen_pool)) { in cpdma_desc_pool_create()
243 ret = PTR_ERR(pool->gen_pool); in cpdma_desc_pool_create()
244 dev_err(ctlr->dev, "pool create failed %d\n", ret); in cpdma_desc_pool_create()
248 if (cpdma_params->desc_mem_phys) { in cpdma_desc_pool_create()
249 pool->phys = cpdma_params->desc_mem_phys; in cpdma_desc_pool_create()
250 pool->iomap = devm_ioremap(ctlr->dev, pool->phys, in cpdma_desc_pool_create()
251 pool->mem_size); in cpdma_desc_pool_create()
252 pool->hw_addr = cpdma_params->desc_hw_addr; in cpdma_desc_pool_create()
254 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, in cpdma_desc_pool_create()
255 &pool->hw_addr, GFP_KERNEL); in cpdma_desc_pool_create()
256 pool->iomap = (void __iomem __force *)pool->cpumap; in cpdma_desc_pool_create()
257 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ in cpdma_desc_pool_create()
260 if (!pool->iomap) in cpdma_desc_pool_create()
263 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, in cpdma_desc_pool_create()
264 pool->phys, pool->mem_size, -1); in cpdma_desc_pool_create()
266 dev_err(ctlr->dev, "pool add failed %d\n", ret); in cpdma_desc_pool_create()
275 ctlr->pool = NULL; in cpdma_desc_pool_create()
284 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; in desc_phys()
290 return dma ? pool->iomap + dma - pool->hw_addr : NULL; in desc_from_phys()
297 gen_pool_alloc(pool->gen_pool, pool->desc_size); in cpdma_desc_alloc()
303 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); in cpdma_desc_free()
311 if (!ctlr->params.has_ext_regs) in _cpdma_control_set()
312 return -ENOTSUPP; in _cpdma_control_set()
314 if (ctlr->state != CPDMA_STATE_ACTIVE) in _cpdma_control_set()
315 return -EINVAL; in _cpdma_control_set()
318 return -ENOENT; in _cpdma_control_set()
320 if ((info->access & ACCESS_WO) != ACCESS_WO) in _cpdma_control_set()
321 return -EPERM; in _cpdma_control_set()
323 val = dma_reg_read(ctlr, info->reg); in _cpdma_control_set()
324 val &= ~(info->mask << info->shift); in _cpdma_control_set()
325 val |= (value & info->mask) << info->shift; in _cpdma_control_set()
326 dma_reg_write(ctlr, info->reg, val); in _cpdma_control_set()
336 if (!ctlr->params.has_ext_regs) in _cpdma_control_get()
337 return -ENOTSUPP; in _cpdma_control_get()
339 if (ctlr->state != CPDMA_STATE_ACTIVE) in _cpdma_control_get()
340 return -EINVAL; in _cpdma_control_get()
343 return -ENOENT; in _cpdma_control_get()
345 if ((info->access & ACCESS_RO) != ACCESS_RO) in _cpdma_control_get()
346 return -EPERM; in _cpdma_control_get()
348 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; in _cpdma_control_get()
352 /* cpdma_chan_set_chan_shaper - set shaper for a channel
357 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_set_chan_shaper()
362 if (!chan->rate) in cpdma_chan_set_chan_shaper()
365 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num; in cpdma_chan_set_chan_shaper()
366 dma_reg_write(ctlr, rate_reg, chan->rate_factor); in cpdma_chan_set_chan_shaper()
369 rmask |= chan->mask; in cpdma_chan_set_chan_shaper()
377 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_on()
378 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_on()
381 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_on()
382 if (chan->state != CPDMA_STATE_IDLE) { in cpdma_chan_on()
383 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_on()
384 return -EBUSY; in cpdma_chan_on()
386 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_on()
387 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_on()
388 return -EINVAL; in cpdma_chan_on()
390 dma_reg_write(ctlr, chan->int_set, chan->mask); in cpdma_chan_on()
391 chan->state = CPDMA_STATE_ACTIVE; in cpdma_chan_on()
392 if (chan->head) { in cpdma_chan_on()
393 chan_write(chan, hdp, desc_phys(pool, chan->head)); in cpdma_chan_on()
394 if (chan->rxfree) in cpdma_chan_on()
395 chan_write(chan, rxfree, chan->count); in cpdma_chan_on()
398 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_on()
402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
403 * rmask - mask of rate limited channels
409 struct cpdma_ctlr *ctlr = ch->ctlr; in cpdma_chan_fit_rate()
411 u32 old_rate = ch->rate; in cpdma_chan_fit_rate()
417 chan = ctlr->channels[i]; in cpdma_chan_fit_rate()
422 chan->rate = rate; in cpdma_chan_fit_rate()
424 if (chan->rate) { in cpdma_chan_fit_rate()
426 new_rmask |= chan->mask; in cpdma_chan_fit_rate()
439 ch->rate = old_rate; in cpdma_chan_fit_rate()
440 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", in cpdma_chan_fit_rate()
441 chan->chan_num); in cpdma_chan_fit_rate()
442 return -EINVAL; in cpdma_chan_fit_rate()
455 if (!ch->rate) { in cpdma_chan_set_factors()
456 ch->rate_factor = 0; in cpdma_chan_set_factors()
460 freq = ctlr->params.bus_freq_mhz * 1000 * 32; in cpdma_chan_set_factors()
462 dev_err(ctlr->dev, "The bus frequency is not set\n"); in cpdma_chan_set_factors()
463 return -EINVAL; in cpdma_chan_set_factors()
466 min_send_cnt = freq - ch->rate; in cpdma_chan_set_factors()
467 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate); in cpdma_chan_set_factors()
469 divident = ch->rate * send_cnt; in cpdma_chan_set_factors()
477 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta; in cpdma_chan_set_factors()
496 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate); in cpdma_chan_set_factors()
497 send_cnt -= idle_cnt; in cpdma_chan_set_factors()
501 ch->rate = best_rate; in cpdma_chan_set_factors()
502 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16); in cpdma_chan_set_factors()
505 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num; in cpdma_chan_set_factors()
506 dma_reg_write(ctlr, rate_reg, ch->rate_factor); in cpdma_chan_set_factors()
514 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); in cpdma_ctlr_create()
518 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_create()
519 ctlr->params = *params; in cpdma_ctlr_create()
520 ctlr->dev = params->dev; in cpdma_ctlr_create()
521 ctlr->chan_num = 0; in cpdma_ctlr_create()
522 spin_lock_init(&ctlr->lock); in cpdma_ctlr_create()
527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2; in cpdma_ctlr_create()
528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; in cpdma_ctlr_create()
530 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) in cpdma_ctlr_create()
531 ctlr->num_chan = CPDMA_MAX_CHANNELS; in cpdma_ctlr_create()
541 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_start()
542 if (ctlr->state != CPDMA_STATE_IDLE) { in cpdma_ctlr_start()
543 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
544 return -EBUSY; in cpdma_ctlr_start()
547 if (ctlr->params.has_soft_reset) { in cpdma_ctlr_start()
555 timeout--; in cpdma_ctlr_start()
560 for (i = 0; i < ctlr->num_chan; i++) { in cpdma_ctlr_start()
561 writel(0, ctlr->params.txhdp + 4 * i); in cpdma_ctlr_start()
562 writel(0, ctlr->params.rxhdp + 4 * i); in cpdma_ctlr_start()
563 writel(0, ctlr->params.txcp + 4 * i); in cpdma_ctlr_start()
564 writel(0, ctlr->params.rxcp + 4 * i); in cpdma_ctlr_start()
573 ctlr->state = CPDMA_STATE_ACTIVE; in cpdma_ctlr_start()
576 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_start()
577 chan = ctlr->channels[i]; in cpdma_ctlr_start()
583 if (is_tx_chan(chan) && !chan->rate) in cpdma_ctlr_start()
591 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
600 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_stop()
601 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_ctlr_stop()
602 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
603 return -EINVAL; in cpdma_ctlr_stop()
606 ctlr->state = CPDMA_STATE_TEARDOWN; in cpdma_ctlr_stop()
607 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
609 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_stop()
610 if (ctlr->channels[i]) in cpdma_ctlr_stop()
611 cpdma_chan_stop(ctlr->channels[i]); in cpdma_ctlr_stop()
614 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_stop()
621 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_stop()
623 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
632 return -EINVAL; in cpdma_ctlr_destroy()
634 if (ctlr->state != CPDMA_STATE_IDLE) in cpdma_ctlr_destroy()
637 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) in cpdma_ctlr_destroy()
638 cpdma_chan_destroy(ctlr->channels[i]); in cpdma_ctlr_destroy()
649 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
650 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_ctlr_int_ctrl()
651 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
652 return -EINVAL; in cpdma_ctlr_int_ctrl()
655 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_int_ctrl()
656 if (ctlr->channels[i]) in cpdma_ctlr_int_ctrl()
657 cpdma_chan_int_ctrl(ctlr->channels[i], enable); in cpdma_ctlr_int_ctrl()
660 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
700 chan = ctlr->channels[i]; in cpdma_chan_set_descs()
704 if (chan->weight) in cpdma_chan_set_descs()
705 chan->desc_num = (chan->weight * desc_num) / 100; in cpdma_chan_set_descs()
707 chan->desc_num = per_ch_desc; in cpdma_chan_set_descs()
709 desc_cnt -= chan->desc_num; in cpdma_chan_set_descs()
711 if (most_dnum < chan->desc_num) { in cpdma_chan_set_descs()
712 most_dnum = chan->desc_num; in cpdma_chan_set_descs()
718 most_chan->desc_num += desc_cnt; in cpdma_chan_set_descs()
722 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
734 if (!ctlr->chan_num) in cpdma_chan_split_pool()
737 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_chan_split_pool()
738 chan = ctlr->channels[i]; in cpdma_chan_split_pool()
743 if (!chan->weight) in cpdma_chan_split_pool()
745 rx_weight += chan->weight; in cpdma_chan_split_pool()
747 if (!chan->weight) in cpdma_chan_split_pool()
749 tx_weight += chan->weight; in cpdma_chan_split_pool()
754 return -EINVAL; in cpdma_chan_split_pool()
756 tx_desc_num = ctlr->num_tx_desc; in cpdma_chan_split_pool()
757 rx_desc_num = ctlr->num_rx_desc; in cpdma_chan_split_pool()
760 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; in cpdma_chan_split_pool()
764 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100; in cpdma_chan_split_pool()
775 /* cpdma_chan_set_weight - set weight of a channel in percentage.
786 struct cpdma_ctlr *ctlr = ch->ctlr; in cpdma_chan_set_weight()
790 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_set_weight()
791 spin_lock_irqsave(&ch->lock, ch_flags); in cpdma_chan_set_weight()
792 if (ch->weight == weight) { in cpdma_chan_set_weight()
793 spin_unlock_irqrestore(&ch->lock, ch_flags); in cpdma_chan_set_weight()
794 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_weight()
797 ch->weight = weight; in cpdma_chan_set_weight()
798 spin_unlock_irqrestore(&ch->lock, ch_flags); in cpdma_chan_set_weight()
800 /* re-split pool using new channel weight */ in cpdma_chan_set_weight()
802 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_weight()
806 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
814 divident = ctlr->params.bus_freq_mhz * 32 * 1000; in cpdma_chan_get_min_rate()
820 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
822 * ch - transmit channel the bandwidth is configured for
823 * rate - bandwidth in Kb/s, if 0 - then off shaper
833 return -EINVAL; in cpdma_chan_set_rate()
835 if (ch->rate == rate) in cpdma_chan_set_rate()
838 ctlr = ch->ctlr; in cpdma_chan_set_rate()
839 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_set_rate()
840 spin_lock_irqsave(&ch->lock, ch_flags); in cpdma_chan_set_rate()
850 spin_unlock_irqrestore(&ch->lock, ch_flags); in cpdma_chan_set_rate()
855 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_rate()
859 spin_unlock_irqrestore(&ch->lock, ch_flags); in cpdma_chan_set_rate()
860 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_rate()
869 spin_lock_irqsave(&ch->lock, flags); in cpdma_chan_get_rate()
870 rate = ch->rate; in cpdma_chan_get_rate()
871 spin_unlock_irqrestore(&ch->lock, flags); in cpdma_chan_get_rate()
885 if (__chan_linear(chan_num) >= ctlr->num_chan) in cpdma_chan_create()
886 return ERR_PTR(-EINVAL); in cpdma_chan_create()
888 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); in cpdma_chan_create()
890 return ERR_PTR(-ENOMEM); in cpdma_chan_create()
892 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_create()
893 if (ctlr->channels[chan_num]) { in cpdma_chan_create()
894 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
895 devm_kfree(ctlr->dev, chan); in cpdma_chan_create()
896 return ERR_PTR(-EBUSY); in cpdma_chan_create()
899 chan->ctlr = ctlr; in cpdma_chan_create()
900 chan->state = CPDMA_STATE_IDLE; in cpdma_chan_create()
901 chan->chan_num = chan_num; in cpdma_chan_create()
902 chan->handler = handler; in cpdma_chan_create()
903 chan->rate = 0; in cpdma_chan_create()
904 chan->weight = 0; in cpdma_chan_create()
907 chan->hdp = ctlr->params.rxhdp + offset; in cpdma_chan_create()
908 chan->cp = ctlr->params.rxcp + offset; in cpdma_chan_create()
909 chan->rxfree = ctlr->params.rxfree + offset; in cpdma_chan_create()
910 chan->int_set = CPDMA_RXINTMASKSET; in cpdma_chan_create()
911 chan->int_clear = CPDMA_RXINTMASKCLEAR; in cpdma_chan_create()
912 chan->td = CPDMA_RXTEARDOWN; in cpdma_chan_create()
913 chan->dir = DMA_FROM_DEVICE; in cpdma_chan_create()
915 chan->hdp = ctlr->params.txhdp + offset; in cpdma_chan_create()
916 chan->cp = ctlr->params.txcp + offset; in cpdma_chan_create()
917 chan->int_set = CPDMA_TXINTMASKSET; in cpdma_chan_create()
918 chan->int_clear = CPDMA_TXINTMASKCLEAR; in cpdma_chan_create()
919 chan->td = CPDMA_TXTEARDOWN; in cpdma_chan_create()
920 chan->dir = DMA_TO_DEVICE; in cpdma_chan_create()
922 chan->mask = BIT(chan_linear(chan)); in cpdma_chan_create()
924 spin_lock_init(&chan->lock); in cpdma_chan_create()
926 ctlr->channels[chan_num] = chan; in cpdma_chan_create()
927 ctlr->chan_num++; in cpdma_chan_create()
931 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
940 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_get_rx_buf_num()
941 desc_num = chan->desc_num; in cpdma_chan_get_rx_buf_num()
942 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_get_rx_buf_num()
953 return -EINVAL; in cpdma_chan_destroy()
954 ctlr = chan->ctlr; in cpdma_chan_destroy()
956 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_destroy()
957 if (chan->state != CPDMA_STATE_IDLE) in cpdma_chan_destroy()
959 ctlr->channels[chan->chan_num] = NULL; in cpdma_chan_destroy()
960 ctlr->chan_num--; in cpdma_chan_destroy()
961 devm_kfree(ctlr->dev, chan); in cpdma_chan_destroy()
964 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_destroy()
973 return -EINVAL; in cpdma_chan_get_stats()
974 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_get_stats()
975 memcpy(stats, &chan->stats, sizeof(*stats)); in cpdma_chan_get_stats()
976 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_get_stats()
983 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_submit()
984 struct cpdma_desc __iomem *prev = chan->tail; in __cpdma_chan_submit()
985 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_submit()
991 /* simple case - idle channel */ in __cpdma_chan_submit()
992 if (!chan->head) { in __cpdma_chan_submit()
993 chan->stats.head_enqueue++; in __cpdma_chan_submit()
994 chan->head = desc; in __cpdma_chan_submit()
995 chan->tail = desc; in __cpdma_chan_submit()
996 if (chan->state == CPDMA_STATE_ACTIVE) in __cpdma_chan_submit()
1003 chan->tail = desc; in __cpdma_chan_submit()
1004 chan->stats.tail_enqueue++; in __cpdma_chan_submit()
1009 (chan->state == CPDMA_STATE_ACTIVE)) { in __cpdma_chan_submit()
1012 chan->stats.misqueued++; in __cpdma_chan_submit()
1018 struct cpdma_chan *chan = si->chan; in cpdma_chan_submit_si()
1019 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_submit_si()
1020 int len = si->len; in cpdma_chan_submit_si()
1026 if (chan->count >= chan->desc_num) { in cpdma_chan_submit_si()
1027 chan->stats.desc_alloc_fail++; in cpdma_chan_submit_si()
1028 return -ENOMEM; in cpdma_chan_submit_si()
1031 desc = cpdma_desc_alloc(ctlr->pool); in cpdma_chan_submit_si()
1033 chan->stats.desc_alloc_fail++; in cpdma_chan_submit_si()
1034 return -ENOMEM; in cpdma_chan_submit_si()
1037 if (len < ctlr->params.min_packet_size) { in cpdma_chan_submit_si()
1038 len = ctlr->params.min_packet_size; in cpdma_chan_submit_si()
1039 chan->stats.runt_transmit_buff++; in cpdma_chan_submit_si()
1043 cpdma_desc_to_port(chan, mode, si->directed); in cpdma_chan_submit_si()
1045 if (si->data_dma) { in cpdma_chan_submit_si()
1046 buffer = si->data_dma; in cpdma_chan_submit_si()
1047 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); in cpdma_chan_submit_si()
1049 buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); in cpdma_chan_submit_si()
1050 ret = dma_mapping_error(ctlr->dev, buffer); in cpdma_chan_submit_si()
1052 cpdma_desc_free(ctlr->pool, desc, 1); in cpdma_chan_submit_si()
1053 return -EINVAL; in cpdma_chan_submit_si()
1060 writel_relaxed(0, &desc->hw_next); in cpdma_chan_submit_si()
1061 writel_relaxed(buffer, &desc->hw_buffer); in cpdma_chan_submit_si()
1062 writel_relaxed(len, &desc->hw_len); in cpdma_chan_submit_si()
1063 writel_relaxed(mode | len, &desc->hw_mode); in cpdma_chan_submit_si()
1064 writel_relaxed((uintptr_t)si->token, &desc->sw_token); in cpdma_chan_submit_si()
1065 writel_relaxed(buffer, &desc->sw_buffer); in cpdma_chan_submit_si()
1066 writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, in cpdma_chan_submit_si()
1067 &desc->sw_len); in cpdma_chan_submit_si()
1072 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) in cpdma_chan_submit_si()
1075 chan->count++; in cpdma_chan_submit_si()
1093 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_idle_submit()
1094 if (chan->state == CPDMA_STATE_TEARDOWN) { in cpdma_chan_idle_submit()
1095 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_idle_submit()
1096 return -EINVAL; in cpdma_chan_idle_submit()
1100 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_idle_submit()
1118 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_idle_submit_mapped()
1119 if (chan->state == CPDMA_STATE_TEARDOWN) { in cpdma_chan_idle_submit_mapped()
1120 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_idle_submit_mapped()
1121 return -EINVAL; in cpdma_chan_idle_submit_mapped()
1125 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_idle_submit_mapped()
1143 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_submit()
1144 if (chan->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_submit()
1145 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_submit()
1146 return -EINVAL; in cpdma_chan_submit()
1150 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_submit()
1168 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_submit_mapped()
1169 if (chan->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_submit_mapped()
1170 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_submit_mapped()
1171 return -EINVAL; in cpdma_chan_submit_mapped()
1175 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_submit_mapped()
1181 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_check_free_tx_desc()
1182 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_check_free_tx_desc()
1186 spin_lock_irqsave(&chan->lock, flags); in cpdma_check_free_tx_desc()
1187 free_tx_desc = (chan->count < chan->desc_num) && in cpdma_check_free_tx_desc()
1188 gen_pool_avail(pool->gen_pool); in cpdma_check_free_tx_desc()
1189 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_check_free_tx_desc()
1197 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_free()
1198 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_free()
1209 dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen, in __cpdma_chan_free()
1210 chan->dir); in __cpdma_chan_free()
1212 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); in __cpdma_chan_free()
1216 (*chan->handler)((void *)token, outlen, status); in __cpdma_chan_free()
1221 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_process()
1225 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_process()
1229 spin_lock_irqsave(&chan->lock, flags); in __cpdma_chan_process()
1231 desc = chan->head; in __cpdma_chan_process()
1233 chan->stats.empty_dequeue++; in __cpdma_chan_process()
1234 status = -ENOENT; in __cpdma_chan_process()
1242 chan->stats.busy_dequeue++; in __cpdma_chan_process()
1243 status = -EBUSY; in __cpdma_chan_process()
1248 outlen -= CPDMA_DESC_CRC_LEN; in __cpdma_chan_process()
1253 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); in __cpdma_chan_process()
1255 chan->count--; in __cpdma_chan_process()
1256 chan->stats.good_dequeue++; in __cpdma_chan_process()
1258 if ((status & CPDMA_DESC_EOQ) && chan->head) { in __cpdma_chan_process()
1259 chan->stats.requeue++; in __cpdma_chan_process()
1260 chan_write(chan, hdp, desc_phys(pool, chan->head)); in __cpdma_chan_process()
1263 spin_unlock_irqrestore(&chan->lock, flags); in __cpdma_chan_process()
1265 cb_status = -ENOSYS; in __cpdma_chan_process()
1273 spin_unlock_irqrestore(&chan->lock, flags); in __cpdma_chan_process()
1281 if (chan->state != CPDMA_STATE_ACTIVE) in cpdma_chan_process()
1282 return -EINVAL; in cpdma_chan_process()
1295 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_start()
1299 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_start()
1301 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_start()
1314 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_stop()
1315 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_stop()
1320 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_stop()
1321 if (chan->state == CPDMA_STATE_TEARDOWN) { in cpdma_chan_stop()
1322 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_stop()
1323 return -EINVAL; in cpdma_chan_stop()
1326 chan->state = CPDMA_STATE_TEARDOWN; in cpdma_chan_stop()
1327 dma_reg_write(ctlr, chan->int_clear, chan->mask); in cpdma_chan_stop()
1330 dma_reg_write(ctlr, chan->td, chan_linear(chan)); in cpdma_chan_stop()
1339 timeout--; in cpdma_chan_stop()
1345 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_stop()
1351 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_stop()
1354 while (chan->head) { in cpdma_chan_stop()
1355 struct cpdma_desc __iomem *desc = chan->head; in cpdma_chan_stop()
1359 chan->head = desc_from_phys(pool, next_dma); in cpdma_chan_stop()
1360 chan->count--; in cpdma_chan_stop()
1361 chan->stats.teardown_dequeue++; in cpdma_chan_stop()
1364 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_stop()
1365 __cpdma_chan_free(chan, desc, 0, -ENOSYS); in cpdma_chan_stop()
1366 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_stop()
1369 chan->state = CPDMA_STATE_IDLE; in cpdma_chan_stop()
1370 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_stop()
1378 spin_lock_irqsave(&chan->lock, flags); in cpdma_chan_int_ctrl()
1379 if (chan->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_int_ctrl()
1380 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_int_ctrl()
1381 return -EINVAL; in cpdma_chan_int_ctrl()
1384 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, in cpdma_chan_int_ctrl()
1385 chan->mask); in cpdma_chan_int_ctrl()
1386 spin_unlock_irqrestore(&chan->lock, flags); in cpdma_chan_int_ctrl()
1396 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_get()
1398 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_get()
1408 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_set()
1410 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_set()
1417 return ctlr->num_rx_desc; in cpdma_get_num_rx_descs()
1422 return ctlr->num_tx_desc; in cpdma_get_num_tx_descs()
1430 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_set_num_rx_descs()
1432 temp = ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1433 ctlr->num_rx_desc = num_rx_desc; in cpdma_set_num_rx_descs()
1434 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1437 ctlr->num_rx_desc = temp; in cpdma_set_num_rx_descs()
1438 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1441 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_set_num_rx_descs()