Lines Matching +full:cpsw +full:- +full:switch

1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver
24 #include "cpsw.h"
33 int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
35 void cpsw_intr_enable(struct cpsw_common *cpsw) in cpsw_intr_enable() argument
37 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); in cpsw_intr_enable()
38 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); in cpsw_intr_enable()
40 cpdma_ctlr_int_ctrl(cpsw->dma, true); in cpsw_intr_enable()
43 void cpsw_intr_disable(struct cpsw_common *cpsw) in cpsw_intr_disable() argument
45 writel_relaxed(0, &cpsw->wr_regs->tx_en); in cpsw_intr_disable()
46 writel_relaxed(0, &cpsw->wr_regs->rx_en); in cpsw_intr_disable()
48 cpdma_ctlr_int_ctrl(cpsw->dma, false); in cpsw_intr_disable()
63 ndev = xmeta->ndev; in cpsw_tx_handler()
64 ch = xmeta->ch; in cpsw_tx_handler()
68 ndev = skb->dev; in cpsw_tx_handler()
70 cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); in cpsw_tx_handler()
81 ndev->stats.tx_packets++; in cpsw_tx_handler()
82 ndev->stats.tx_bytes += len; in cpsw_tx_handler()
87 struct cpsw_common *cpsw = dev_id; in cpsw_tx_interrupt() local
89 writel(0, &cpsw->wr_regs->tx_en); in cpsw_tx_interrupt()
90 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); in cpsw_tx_interrupt()
92 if (cpsw->quirk_irq) { in cpsw_tx_interrupt()
93 disable_irq_nosync(cpsw->irqs_table[1]); in cpsw_tx_interrupt()
94 cpsw->tx_irq_disabled = true; in cpsw_tx_interrupt()
97 napi_schedule(&cpsw->napi_tx); in cpsw_tx_interrupt()
103 struct cpsw_common *cpsw = dev_id; in cpsw_rx_interrupt() local
105 writel(0, &cpsw->wr_regs->rx_en); in cpsw_rx_interrupt()
106 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); in cpsw_rx_interrupt()
108 if (cpsw->quirk_irq) { in cpsw_rx_interrupt()
109 disable_irq_nosync(cpsw->irqs_table[0]); in cpsw_rx_interrupt()
110 cpsw->rx_irq_disabled = true; in cpsw_rx_interrupt()
113 napi_schedule(&cpsw->napi_rx); in cpsw_rx_interrupt()
119 struct cpsw_common *cpsw = dev_id; in cpsw_misc_interrupt() local
121 writel(0, &cpsw->wr_regs->misc_en); in cpsw_misc_interrupt()
122 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC); in cpsw_misc_interrupt()
123 cpts_misc_interrupt(cpsw->cpts); in cpsw_misc_interrupt()
124 writel(0x10, &cpsw->wr_regs->misc_en); in cpsw_misc_interrupt()
131 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); in cpsw_tx_mq_poll() local
137 ch_map = cpdma_ctrl_txchs_state(cpsw->dma); in cpsw_tx_mq_poll()
142 txv = &cpsw->txv[ch]; in cpsw_tx_mq_poll()
143 if (unlikely(txv->budget > budget - num_tx)) in cpsw_tx_mq_poll()
144 cur_budget = budget - num_tx; in cpsw_tx_mq_poll()
146 cur_budget = txv->budget; in cpsw_tx_mq_poll()
148 num_tx += cpdma_chan_process(txv->ch, cur_budget); in cpsw_tx_mq_poll()
155 writel(0xff, &cpsw->wr_regs->tx_en); in cpsw_tx_mq_poll()
163 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); in cpsw_tx_poll() local
166 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); in cpsw_tx_poll()
169 writel(0xff, &cpsw->wr_regs->tx_en); in cpsw_tx_poll()
170 if (cpsw->tx_irq_disabled) { in cpsw_tx_poll()
171 cpsw->tx_irq_disabled = false; in cpsw_tx_poll()
172 enable_irq(cpsw->irqs_table[1]); in cpsw_tx_poll()
181 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); in cpsw_rx_mq_poll() local
187 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); in cpsw_rx_mq_poll()
192 rxv = &cpsw->rxv[ch]; in cpsw_rx_mq_poll()
193 if (unlikely(rxv->budget > budget - num_rx)) in cpsw_rx_mq_poll()
194 cur_budget = budget - num_rx; in cpsw_rx_mq_poll()
196 cur_budget = rxv->budget; in cpsw_rx_mq_poll()
198 num_rx += cpdma_chan_process(rxv->ch, cur_budget); in cpsw_rx_mq_poll()
205 writel(0xff, &cpsw->wr_regs->rx_en); in cpsw_rx_mq_poll()
213 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); in cpsw_rx_poll() local
216 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); in cpsw_rx_poll()
219 writel(0xff, &cpsw->wr_regs->rx_en); in cpsw_rx_poll()
220 if (cpsw->rx_irq_disabled) { in cpsw_rx_poll()
221 cpsw->rx_irq_disabled = false; in cpsw_rx_poll()
222 enable_irq(cpsw->irqs_table[0]); in cpsw_rx_poll()
231 struct cpsw_priv *priv = netdev_priv(skb->dev); in cpsw_rx_vlan_encap()
232 u32 rx_vlan_encap_hdr = *((u32 *)skb->data); in cpsw_rx_vlan_encap()
233 struct cpsw_common *cpsw = priv->cpsw; in cpsw_rx_vlan_encap() local
242 /* Ignore unknown & Priority-tagged packets*/ in cpsw_rx_vlan_encap()
255 if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) { in cpsw_rx_vlan_encap()
264 /* strip vlan tag for VLAN-tagged packet */ in cpsw_rx_vlan_encap()
266 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in cpsw_rx_vlan_encap()
273 slave_write(slave, mac_hi(priv->mac_addr), SA_HI); in cpsw_set_slave_mac()
274 slave_write(slave, mac_lo(priv->mac_addr), SA_LO); in cpsw_set_slave_mac()
286 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); in soft_reset()
292 struct cpsw_common *cpsw = priv->cpsw; in cpsw_ndo_tx_timeout() local
296 ndev->stats.tx_errors++; in cpsw_ndo_tx_timeout()
297 cpsw_intr_disable(cpsw); in cpsw_ndo_tx_timeout()
298 for (ch = 0; ch < cpsw->tx_ch_num; ch++) { in cpsw_ndo_tx_timeout()
299 cpdma_chan_stop(cpsw->txv[ch].ch); in cpsw_ndo_tx_timeout()
300 cpdma_chan_start(cpsw->txv[ch].ch); in cpsw_ndo_tx_timeout()
303 cpsw_intr_enable(cpsw); in cpsw_ndo_tx_timeout()
308 static int cpsw_get_common_speed(struct cpsw_common *cpsw) in cpsw_get_common_speed() argument
312 for (i = 0, speed = 0; i < cpsw->data.slaves; i++) in cpsw_get_common_speed()
313 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) in cpsw_get_common_speed()
314 speed += cpsw->slaves[i].phy->speed; in cpsw_get_common_speed()
319 int cpsw_need_resplit(struct cpsw_common *cpsw) in cpsw_need_resplit() argument
324 /* re-split resources only in case speed was changed */ in cpsw_need_resplit()
325 speed = cpsw_get_common_speed(cpsw); in cpsw_need_resplit()
326 if (speed == cpsw->speed || !speed) in cpsw_need_resplit()
329 cpsw->speed = speed; in cpsw_need_resplit()
331 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { in cpsw_need_resplit()
332 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); in cpsw_need_resplit()
340 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) in cpsw_need_resplit()
346 void cpsw_split_res(struct cpsw_common *cpsw) in cpsw_split_res() argument
349 struct cpsw_vector *txv = cpsw->txv; in cpsw_split_res()
355 for (i = 0; i < cpsw->tx_ch_num; i++) { in cpsw_split_res()
364 if (cpsw->tx_ch_num == rlim_ch_num) { in cpsw_split_res()
367 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; in cpsw_split_res()
371 max_rate = cpsw->speed * 1000; in cpsw_split_res()
383 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / in cpsw_split_res()
384 (cpsw->tx_ch_num - rlim_ch_num); in cpsw_split_res()
385 bigest_rate = (max_rate - consumed_rate) / in cpsw_split_res()
386 (cpsw->tx_ch_num - rlim_ch_num); in cpsw_split_res()
391 for (i = 0; i < cpsw->tx_ch_num; i++) { in cpsw_split_res()
405 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); in cpsw_split_res()
410 cpdma_chan_set_weight(cpsw->txv[i].ch, 0); in cpsw_split_res()
413 budget -= txv[i].budget; in cpsw_split_res()
421 ch_budget = budget / cpsw->rx_ch_num; in cpsw_split_res()
422 for (i = 0; i < cpsw->rx_ch_num; i++) { in cpsw_split_res()
423 cpsw->rxv[i].budget = ch_budget; in cpsw_split_res()
424 budget -= ch_budget; in cpsw_split_res()
428 cpsw->rxv[0].budget += budget; in cpsw_split_res()
431 int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, in cpsw_init_common() argument
439 struct device *dev = cpsw->dev; in cpsw_init_common()
444 data = &cpsw->data; in cpsw_init_common()
445 cpsw->rx_ch_num = 1; in cpsw_init_common()
446 cpsw->tx_ch_num = 1; in cpsw_init_common()
448 cpsw->version = readl(&cpsw->regs->id_ver); in cpsw_init_common()
453 switch (cpsw->version) { in cpsw_init_common()
455 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; in cpsw_init_common()
457 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; in cpsw_init_common()
469 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; in cpsw_init_common()
471 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; in cpsw_init_common()
481 dev_err(dev, "unknown version 0x%08x\n", cpsw->version); in cpsw_init_common()
482 return -ENODEV; in cpsw_init_common()
485 for (i = 0; i < cpsw->data.slaves; i++) { in cpsw_init_common()
486 struct cpsw_slave *slave = &cpsw->slaves[i]; in cpsw_init_common()
487 void __iomem *regs = cpsw->regs; in cpsw_init_common()
489 slave->slave_num = i; in cpsw_init_common()
490 slave->data = &cpsw->data.slave_data[i]; in cpsw_init_common()
491 slave->regs = regs + slave_offset; in cpsw_init_common()
492 slave->port_vlan = slave->data->dual_emac_res_vlan; in cpsw_init_common()
493 slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset); in cpsw_init_common()
494 if (IS_ERR(slave->mac_sl)) in cpsw_init_common()
495 return PTR_ERR(slave->mac_sl); in cpsw_init_common()
504 ale_params.dev_id = "cpsw"; in cpsw_init_common()
506 cpsw->ale = cpsw_ale_create(&ale_params); in cpsw_init_common()
507 if (IS_ERR(cpsw->ale)) { in cpsw_init_common()
509 return PTR_ERR(cpsw->ale); in cpsw_init_common()
519 dma_params.num_chan = data->channels; in cpsw_init_common()
522 dma_params.desc_mem_size = data->bd_ram_size; in cpsw_init_common()
526 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; in cpsw_init_common()
529 cpsw->dma = cpdma_ctlr_create(&dma_params); in cpsw_init_common()
530 if (!cpsw->dma) { in cpsw_init_common()
532 return -ENOMEM; in cpsw_init_common()
535 cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts"); in cpsw_init_common()
537 cpts_node = cpsw->dev->of_node; in cpsw_init_common()
539 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node, in cpsw_init_common()
541 if (IS_ERR(cpsw->cpts)) { in cpsw_init_common()
542 ret = PTR_ERR(cpsw->cpts); in cpsw_init_common()
543 cpdma_ctlr_destroy(cpsw->dma); in cpsw_init_common()
554 struct cpsw_common *cpsw = priv->cpsw; in cpsw_hwtstamp_v1() local
555 struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_hwtstamp_v1()
558 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { in cpsw_hwtstamp_v1()
566 if (priv->tx_ts_enabled) in cpsw_hwtstamp_v1()
569 if (priv->rx_ts_enabled) in cpsw_hwtstamp_v1()
578 struct cpsw_common *cpsw = priv->cpsw; in cpsw_hwtstamp_v2() local
582 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_hwtstamp_v2()
585 switch (cpsw->version) { in cpsw_hwtstamp_v2()
589 if (priv->tx_ts_enabled) in cpsw_hwtstamp_v2()
592 if (priv->rx_ts_enabled) in cpsw_hwtstamp_v2()
599 if (priv->tx_ts_enabled) in cpsw_hwtstamp_v2()
602 if (priv->rx_ts_enabled) in cpsw_hwtstamp_v2()
611 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); in cpsw_hwtstamp_v2()
612 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); in cpsw_hwtstamp_v2()
618 struct cpsw_common *cpsw = priv->cpsw; in cpsw_hwtstamp_set() local
621 if (cpsw->version != CPSW_VERSION_1 && in cpsw_hwtstamp_set()
622 cpsw->version != CPSW_VERSION_2 && in cpsw_hwtstamp_set()
623 cpsw->version != CPSW_VERSION_3) in cpsw_hwtstamp_set()
624 return -EOPNOTSUPP; in cpsw_hwtstamp_set()
626 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) in cpsw_hwtstamp_set()
627 return -EFAULT; in cpsw_hwtstamp_set()
631 return -EINVAL; in cpsw_hwtstamp_set()
634 return -ERANGE; in cpsw_hwtstamp_set()
636 switch (cfg.rx_filter) { in cpsw_hwtstamp_set()
638 priv->rx_ts_enabled = 0; in cpsw_hwtstamp_set()
645 return -ERANGE; in cpsw_hwtstamp_set()
655 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; in cpsw_hwtstamp_set()
659 return -ERANGE; in cpsw_hwtstamp_set()
662 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; in cpsw_hwtstamp_set()
664 switch (cpsw->version) { in cpsw_hwtstamp_set()
676 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; in cpsw_hwtstamp_set()
681 struct cpsw_common *cpsw = ndev_to_cpsw(dev); in cpsw_hwtstamp_get() local
685 if (cpsw->version != CPSW_VERSION_1 && in cpsw_hwtstamp_get()
686 cpsw->version != CPSW_VERSION_2 && in cpsw_hwtstamp_get()
687 cpsw->version != CPSW_VERSION_3) in cpsw_hwtstamp_get()
688 return -EOPNOTSUPP; in cpsw_hwtstamp_get()
691 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in cpsw_hwtstamp_get()
692 cfg.rx_filter = priv->rx_ts_enabled; in cpsw_hwtstamp_get()
694 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; in cpsw_hwtstamp_get()
699 return -EOPNOTSUPP; in cpsw_hwtstamp_get()
704 return -EOPNOTSUPP; in cpsw_hwtstamp_set()
711 struct cpsw_common *cpsw = priv->cpsw; in cpsw_ndo_ioctl() local
712 int slave_no = cpsw_slave_index(cpsw, priv); in cpsw_ndo_ioctl()
715 return -EINVAL; in cpsw_ndo_ioctl()
717 switch (cmd) { in cpsw_ndo_ioctl()
724 if (!cpsw->slaves[slave_no].phy) in cpsw_ndo_ioctl()
725 return -EOPNOTSUPP; in cpsw_ndo_ioctl()
726 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); in cpsw_ndo_ioctl()
732 struct cpsw_common *cpsw = priv->cpsw; in cpsw_ndo_set_tx_maxrate() local
738 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; in cpsw_ndo_set_tx_maxrate()
743 min_rate = cpdma_chan_get_min_rate(cpsw->dma); in cpsw_ndo_set_tx_maxrate()
745 dev_err(priv->dev, "The channel rate cannot be less than %dMbps", in cpsw_ndo_set_tx_maxrate()
747 return -EINVAL; in cpsw_ndo_set_tx_maxrate()
750 if (rate > cpsw->speed) { in cpsw_ndo_set_tx_maxrate()
751 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); in cpsw_ndo_set_tx_maxrate()
752 return -EINVAL; in cpsw_ndo_set_tx_maxrate()
755 ret = pm_runtime_get_sync(cpsw->dev); in cpsw_ndo_set_tx_maxrate()
757 pm_runtime_put_noidle(cpsw->dev); in cpsw_ndo_set_tx_maxrate()
761 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); in cpsw_ndo_set_tx_maxrate()
762 pm_runtime_put(cpsw->dev); in cpsw_ndo_set_tx_maxrate()
768 for (i = 0; i < cpsw->data.slaves; i++) { in cpsw_ndo_set_tx_maxrate()
769 slave = &cpsw->slaves[i]; in cpsw_ndo_set_tx_maxrate()
770 if (!slave->ndev) in cpsw_ndo_set_tx_maxrate()
773 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; in cpsw_ndo_set_tx_maxrate()
776 cpsw_split_res(cpsw); in cpsw_ndo_set_tx_maxrate()
782 if (tc == num_tc - 1) in cpsw_tc_to_fifo()
785 return CPSW_FIFO_SHAPERS_NUM - tc; in cpsw_tc_to_fifo()
790 struct cpsw_common *cpsw = priv->cpsw; in cpsw_shp_is_off() local
794 val = readl_relaxed(&cpsw->regs->ptype); in cpsw_shp_is_off()
796 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_shp_is_off()
797 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; in cpsw_shp_is_off()
806 struct cpsw_common *cpsw = priv->cpsw; in cpsw_fifo_shp_on() local
810 val = readl_relaxed(&cpsw->regs->ptype); in cpsw_fifo_shp_on()
812 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_fifo_shp_on()
813 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; in cpsw_fifo_shp_on()
814 mask = (1 << --fifo) << shift; in cpsw_fifo_shp_on()
817 writel_relaxed(val, &cpsw->regs->ptype); in cpsw_fifo_shp_on()
822 struct cpsw_common *cpsw = priv->cpsw; in cpsw_set_fifo_bw() local
827 if (bw > priv->shp_cfg_speed * 1000) in cpsw_set_fifo_bw()
833 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_set_fifo_bw()
835 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { in cpsw_set_fifo_bw()
837 if (i >= fifo || !priv->fifo_bw[i]) in cpsw_set_fifo_bw()
840 dev_warn(priv->dev, "Prev FIFO%d is shaped", i); in cpsw_set_fifo_bw()
844 if (!priv->fifo_bw[i] && i > fifo) { in cpsw_set_fifo_bw()
845 dev_err(priv->dev, "Upper FIFO%d is not shaped", i); in cpsw_set_fifo_bw()
846 return -EINVAL; in cpsw_set_fifo_bw()
849 shift = (i - 1) * 8; in cpsw_set_fifo_bw()
852 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); in cpsw_set_fifo_bw()
861 if (priv->fifo_bw[i]) in cpsw_set_fifo_bw()
869 priv->fifo_bw[fifo] = bw; in cpsw_set_fifo_bw()
871 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, in cpsw_set_fifo_bw()
872 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); in cpsw_set_fifo_bw()
876 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); in cpsw_set_fifo_bw()
877 return -EINVAL; in cpsw_set_fifo_bw()
882 struct cpsw_common *cpsw = priv->cpsw; in cpsw_set_fifo_rlimit() local
891 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_set_fifo_rlimit()
892 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? in cpsw_set_fifo_rlimit()
910 priv->shp_cfg_speed = 0; in cpsw_set_fifo_rlimit()
926 * class A - prio 3
927 * class B - prio 2
934 struct cpsw_common *cpsw = priv->cpsw; in cpsw_set_cbs() local
940 tc = netdev_txq_to_tc(priv->ndev, qopt->queue); in cpsw_set_cbs()
946 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); in cpsw_set_cbs()
948 dev_err(priv->dev, "Last tc%d can't be rate limited", tc); in cpsw_set_cbs()
949 return -EINVAL; in cpsw_set_cbs()
953 if (!qopt->enable && !priv->fifo_bw[fifo]) in cpsw_set_cbs()
957 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_set_cbs()
958 if (slave->phy && slave->phy->link) { in cpsw_set_cbs()
959 if (priv->shp_cfg_speed && in cpsw_set_cbs()
960 priv->shp_cfg_speed != slave->phy->speed) in cpsw_set_cbs()
961 prev_speed = priv->shp_cfg_speed; in cpsw_set_cbs()
963 priv->shp_cfg_speed = slave->phy->speed; in cpsw_set_cbs()
966 if (!priv->shp_cfg_speed) { in cpsw_set_cbs()
967 dev_err(priv->dev, "Link speed is not known"); in cpsw_set_cbs()
968 return -1; in cpsw_set_cbs()
971 ret = pm_runtime_get_sync(cpsw->dev); in cpsw_set_cbs()
973 pm_runtime_put_noidle(cpsw->dev); in cpsw_set_cbs()
977 bw = qopt->enable ? qopt->idleslope : 0; in cpsw_set_cbs()
980 priv->shp_cfg_speed = prev_speed; in cpsw_set_cbs()
985 dev_warn(priv->dev, in cpsw_set_cbs()
988 pm_runtime_put_sync(cpsw->dev); in cpsw_set_cbs()
996 struct cpsw_common *cpsw = priv->cpsw; in cpsw_set_mqprio() local
1002 num_tc = mqprio->qopt.num_tc; in cpsw_set_mqprio()
1004 return -EINVAL; in cpsw_set_mqprio()
1006 if (mqprio->mode != TC_MQPRIO_MODE_DCB) in cpsw_set_mqprio()
1007 return -EINVAL; in cpsw_set_mqprio()
1009 ret = pm_runtime_get_sync(cpsw->dev); in cpsw_set_mqprio()
1011 pm_runtime_put_noidle(cpsw->dev); in cpsw_set_mqprio()
1017 tc = mqprio->qopt.prio_tc_map[i]; in cpsw_set_mqprio()
1024 count = mqprio->qopt.count[i]; in cpsw_set_mqprio()
1025 offset = mqprio->qopt.offset[i]; in cpsw_set_mqprio()
1030 if (!mqprio->qopt.hw) { in cpsw_set_mqprio()
1036 priv->mqprio_hw = mqprio->qopt.hw; in cpsw_set_mqprio()
1038 offset = cpsw->version == CPSW_VERSION_1 ? in cpsw_set_mqprio()
1041 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; in cpsw_set_mqprio()
1044 pm_runtime_put_sync(cpsw->dev); in cpsw_set_mqprio()
1052 switch (type) { in cpsw_ndo_setup_tc()
1060 return -EOPNOTSUPP; in cpsw_ndo_setup_tc()
1068 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { in cpsw_cbs_resume()
1069 bw = priv->fifo_bw[fifo]; in cpsw_cbs_resume()
1079 struct cpsw_common *cpsw = priv->cpsw; in cpsw_mqprio_resume() local
1084 if (!priv->mqprio_hw) in cpsw_mqprio_resume()
1088 tc = netdev_get_prio_tc_map(priv->ndev, i); in cpsw_mqprio_resume()
1089 fifo = CPSW_FIFO_SHAPERS_NUM - tc; in cpsw_mqprio_resume()
1093 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? in cpsw_mqprio_resume()
1101 struct cpsw_common *cpsw = priv->cpsw; in cpsw_fill_rx_channels() local
1109 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { in cpsw_fill_rx_channels()
1110 pool = cpsw->page_pool[ch]; in cpsw_fill_rx_channels()
1111 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); in cpsw_fill_rx_channels()
1116 return -ENOMEM; in cpsw_fill_rx_channels()
1120 xmeta->ndev = priv->ndev; in cpsw_fill_rx_channels()
1121 xmeta->ch = ch; in cpsw_fill_rx_channels()
1124 ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, in cpsw_fill_rx_channels()
1126 cpsw->rx_packet_max, in cpsw_fill_rx_channels()
1144 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, in cpsw_create_page_pool() argument
1155 pp_params.dev = cpsw->dev; in cpsw_create_page_pool()
1159 dev_err(cpsw->dev, "cannot create rx page pool\n"); in cpsw_create_page_pool()
1164 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) in cpsw_create_rx_pool() argument
1169 pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); in cpsw_create_rx_pool()
1170 pool = cpsw_create_page_pool(cpsw, pool_size); in cpsw_create_rx_pool()
1174 cpsw->page_pool[ch] = pool; in cpsw_create_rx_pool()
1181 struct cpsw_common *cpsw = priv->cpsw; in cpsw_ndev_create_xdp_rxq() local
1186 pool = cpsw->page_pool[ch]; in cpsw_ndev_create_xdp_rxq()
1187 rxq = &priv->xdp_rxq[ch]; in cpsw_ndev_create_xdp_rxq()
1189 ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); in cpsw_ndev_create_xdp_rxq()
1202 struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; in cpsw_ndev_destroy_xdp_rxq()
1210 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) in cpsw_destroy_xdp_rxqs() argument
1215 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { in cpsw_destroy_xdp_rxqs()
1216 for (i = 0; i < cpsw->data.slaves; i++) { in cpsw_destroy_xdp_rxqs()
1217 ndev = cpsw->slaves[i].ndev; in cpsw_destroy_xdp_rxqs()
1224 page_pool_destroy(cpsw->page_pool[ch]); in cpsw_destroy_xdp_rxqs()
1225 cpsw->page_pool[ch] = NULL; in cpsw_destroy_xdp_rxqs()
1229 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) in cpsw_create_xdp_rxqs() argument
1234 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { in cpsw_create_xdp_rxqs()
1235 ret = cpsw_create_rx_pool(cpsw, ch); in cpsw_create_xdp_rxqs()
1242 for (i = 0; i < cpsw->data.slaves; i++) { in cpsw_create_xdp_rxqs()
1243 ndev = cpsw->slaves[i].ndev; in cpsw_create_xdp_rxqs()
1256 cpsw_destroy_xdp_rxqs(cpsw); in cpsw_create_xdp_rxqs()
1263 struct bpf_prog *prog = bpf->prog; in cpsw_xdp_prog_setup()
1265 if (!priv->xdpi.prog && !prog) in cpsw_xdp_prog_setup()
1268 WRITE_ONCE(priv->xdp_prog, prog); in cpsw_xdp_prog_setup()
1270 xdp_attachment_setup(&priv->xdpi, bpf); in cpsw_xdp_prog_setup()
1279 switch (bpf->command) { in cpsw_ndo_bpf()
1284 return -EINVAL; in cpsw_ndo_bpf()
1291 struct cpsw_common *cpsw = priv->cpsw; in cpsw_xdp_tx_frame() local
1298 xmeta->ndev = priv->ndev; in cpsw_xdp_tx_frame()
1299 xmeta->ch = 0; in cpsw_xdp_tx_frame()
1300 txch = cpsw->txv[0].ch; in cpsw_xdp_tx_frame()
1304 dma += xdpf->headroom + sizeof(struct xdp_frame); in cpsw_xdp_tx_frame()
1306 dma, xdpf->len, port); in cpsw_xdp_tx_frame()
1308 if (sizeof(*xmeta) > xdpf->headroom) { in cpsw_xdp_tx_frame()
1310 return -EINVAL; in cpsw_xdp_tx_frame()
1314 xdpf->data, xdpf->len, port); in cpsw_xdp_tx_frame()
1318 priv->ndev->stats.tx_dropped++; in cpsw_xdp_tx_frame()
1328 struct cpsw_common *cpsw = priv->cpsw; in cpsw_run_xdp() local
1329 struct net_device *ndev = priv->ndev; in cpsw_run_xdp()
1337 prog = READ_ONCE(priv->xdp_prog); in cpsw_run_xdp()
1344 switch (act) { in cpsw_run_xdp()
1380 page_pool_recycle_direct(cpsw->page_pool[ch], page); in cpsw_run_xdp()