Lines Matching +full:rx +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0+
7 * https://github.com/microchip-ung/sparx-5_reginfo
15 #include <linux/dma-mapping.h>
42 * +---------------------------+
44 * +---------------------------+
46 * +---------------------------+
48 * +---------------------------+
50 * +---------------------------+
52 * +---------------------------+
54 * +---------------------------+
56 * +---------------------------+
58 * |-------------|-------------|
64 * |---------------------------|
66 * +-------------|-------------+
68 * +-------------|-------------+
79 static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, in sparx5_fdma_rx_add_dcb() argument
87 struct sparx5_db_hw *db = &dcb->db[idx]; in sparx5_fdma_rx_add_dcb()
89 db->status = FDMA_DCB_STATUS_INTR; in sparx5_fdma_rx_add_dcb()
91 dcb->nextptr = FDMA_DCB_INVALID_DATA; in sparx5_fdma_rx_add_dcb()
92 dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); in sparx5_fdma_rx_add_dcb()
93 rx->last_entry->nextptr = nextptr; in sparx5_fdma_rx_add_dcb()
94 rx->last_entry = dcb; in sparx5_fdma_rx_add_dcb()
97 static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, in sparx5_fdma_tx_add_dcb() argument
105 struct sparx5_db_hw *db = &dcb->db[idx]; in sparx5_fdma_tx_add_dcb()
107 db->status = FDMA_DCB_STATUS_DONE; in sparx5_fdma_tx_add_dcb()
109 dcb->nextptr = FDMA_DCB_INVALID_DATA; in sparx5_fdma_tx_add_dcb()
110 dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); in sparx5_fdma_tx_add_dcb()
113 static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) in sparx5_fdma_rx_activate() argument
116 spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, in sparx5_fdma_rx_activate()
117 FDMA_DCB_LLP(rx->channel_id)); in sparx5_fdma_rx_activate()
118 spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); in sparx5_fdma_rx_activate()
120 /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ in sparx5_fdma_rx_activate()
124 sparx5, FDMA_CH_CFG(rx->channel_id)); in sparx5_fdma_rx_activate()
126 /* Set the RX Watermark to max */ in sparx5_fdma_rx_activate()
131 /* Start RX fdma */ in sparx5_fdma_rx_activate()
135 /* Enable RX channel DB interrupt */ in sparx5_fdma_rx_activate()
136 spx5_rmw(BIT(rx->channel_id), in sparx5_fdma_rx_activate()
137 BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, in sparx5_fdma_rx_activate()
140 /* Activate the RX channel */ in sparx5_fdma_rx_activate()
141 spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); in sparx5_fdma_rx_activate()
144 static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) in sparx5_fdma_rx_deactivate() argument
146 /* Dectivate the RX channel */ in sparx5_fdma_rx_deactivate()
147 spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, in sparx5_fdma_rx_deactivate()
150 /* Disable RX channel DB interrupt */ in sparx5_fdma_rx_deactivate()
151 spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, in sparx5_fdma_rx_deactivate()
154 /* Stop RX fdma */ in sparx5_fdma_rx_deactivate()
159 static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) in sparx5_fdma_tx_activate() argument
162 spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, in sparx5_fdma_tx_activate()
163 FDMA_DCB_LLP(tx->channel_id)); in sparx5_fdma_tx_activate()
164 spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); in sparx5_fdma_tx_activate()
166 /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ in sparx5_fdma_tx_activate()
170 sparx5, FDMA_CH_CFG(tx->channel_id)); in sparx5_fdma_tx_activate()
172 /* Start TX fdma */ in sparx5_fdma_tx_activate()
177 spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); in sparx5_fdma_tx_activate()
180 static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) in sparx5_fdma_tx_deactivate() argument
183 spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, in sparx5_fdma_tx_deactivate()
187 static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) in sparx5_fdma_rx_reload() argument
189 /* Reload the RX channel */ in sparx5_fdma_rx_reload()
190 spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); in sparx5_fdma_rx_reload()
193 static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) in sparx5_fdma_tx_reload() argument
195 /* Reload the TX channel */ in sparx5_fdma_tx_reload()
196 spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); in sparx5_fdma_tx_reload()
199 static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) in sparx5_fdma_rx_alloc_skb() argument
201 return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, in sparx5_fdma_rx_alloc_skb()
205 static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) in sparx5_fdma_rx_get_frame() argument
216 db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; in sparx5_fdma_rx_get_frame()
217 if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) in sparx5_fdma_rx_get_frame()
219 skb = rx->skb[rx->dcb_index][rx->db_index]; in sparx5_fdma_rx_get_frame()
221 new_skb = sparx5_fdma_rx_alloc_skb(rx); in sparx5_fdma_rx_get_frame()
225 dma_addr = virt_to_phys(new_skb->data); in sparx5_fdma_rx_get_frame()
226 rx->skb[rx->dcb_index][rx->db_index] = new_skb; in sparx5_fdma_rx_get_frame()
227 db_hw->dataptr = dma_addr; in sparx5_fdma_rx_get_frame()
228 packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); in sparx5_fdma_rx_get_frame()
231 sparx5_ifh_parse((u32 *)skb->data, &fi); in sparx5_fdma_rx_get_frame()
233 port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; in sparx5_fdma_rx_get_frame()
234 if (!port || !port->ndev) { in sparx5_fdma_rx_get_frame()
235 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); in sparx5_fdma_rx_get_frame()
239 skb->dev = port->ndev; in sparx5_fdma_rx_get_frame()
241 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) in sparx5_fdma_rx_get_frame()
242 skb_trim(skb, skb->len - ETH_FCS_LEN); in sparx5_fdma_rx_get_frame()
243 skb->protocol = eth_type_trans(skb, skb->dev); in sparx5_fdma_rx_get_frame()
247 if (test_bit(port->portno, sparx5->bridge_mask)) in sparx5_fdma_rx_get_frame()
248 skb->offload_fwd_mark = 1; in sparx5_fdma_rx_get_frame()
249 skb->dev->stats.rx_bytes += skb->len; in sparx5_fdma_rx_get_frame()
250 skb->dev->stats.rx_packets++; in sparx5_fdma_rx_get_frame()
251 rx->packets++; in sparx5_fdma_rx_get_frame()
258 struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); in sparx5_fdma_napi_callback() local
259 struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); in sparx5_fdma_napi_callback()
262 while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { in sparx5_fdma_napi_callback()
265 rx->db_index++; in sparx5_fdma_napi_callback()
268 if (rx->db_index != FDMA_RX_DCB_MAX_DBS) in sparx5_fdma_napi_callback()
273 rx->db_index = 0; in sparx5_fdma_napi_callback()
274 old_dcb = &rx->dcb_entries[rx->dcb_index]; in sparx5_fdma_napi_callback()
275 rx->dcb_index++; in sparx5_fdma_napi_callback()
276 rx->dcb_index &= FDMA_DCB_MAX - 1; in sparx5_fdma_napi_callback()
277 sparx5_fdma_rx_add_dcb(rx, old_dcb, in sparx5_fdma_napi_callback()
278 rx->dma + in sparx5_fdma_napi_callback()
279 ((unsigned long)old_dcb - in sparx5_fdma_napi_callback()
280 (unsigned long)rx->dcb_entries)); in sparx5_fdma_napi_callback()
283 napi_complete_done(&rx->napi, counter); in sparx5_fdma_napi_callback()
284 spx5_rmw(BIT(rx->channel_id), in sparx5_fdma_napi_callback()
285 BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, in sparx5_fdma_napi_callback()
289 sparx5_fdma_rx_reload(sparx5, rx); in sparx5_fdma_napi_callback()
293 static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, in sparx5_fdma_next_dcb() argument
300 /* Handle wrap-around */ in sparx5_fdma_next_dcb()
302 ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) in sparx5_fdma_next_dcb()
303 next_dcb = tx->first_entry; in sparx5_fdma_next_dcb()
310 struct sparx5_tx *tx = &sparx5->tx; in sparx5_fdma_xmit() local
315 next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); in sparx5_fdma_xmit()
316 db_hw = &next_dcb_hw->db[0]; in sparx5_fdma_xmit()
317 if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) in sparx5_fdma_xmit()
318 tx->dropped++; in sparx5_fdma_xmit()
319 db = list_first_entry(&tx->db_list, struct sparx5_db, list); in sparx5_fdma_xmit()
320 list_move_tail(&db->list, &tx->db_list); in sparx5_fdma_xmit()
321 next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; in sparx5_fdma_xmit()
322 tx->curr_entry->nextptr = tx->dma + in sparx5_fdma_xmit()
323 ((unsigned long)next_dcb_hw - in sparx5_fdma_xmit()
324 (unsigned long)tx->first_entry); in sparx5_fdma_xmit()
325 tx->curr_entry = next_dcb_hw; in sparx5_fdma_xmit()
326 memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); in sparx5_fdma_xmit()
327 memcpy(db->cpu_addr, ifh, IFH_LEN * 4); in sparx5_fdma_xmit()
328 memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); in sparx5_fdma_xmit()
329 db_hw->status = FDMA_DCB_STATUS_SOF | in sparx5_fdma_xmit()
332 FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); in sparx5_fdma_xmit()
334 sparx5_fdma_tx_activate(sparx5, tx); in sparx5_fdma_xmit()
337 sparx5_fdma_tx_reload(sparx5, tx); in sparx5_fdma_xmit()
344 struct sparx5_rx *rx = &sparx5->rx; in sparx5_fdma_rx_alloc() local
351 rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); in sparx5_fdma_rx_alloc()
352 if (!rx->dcb_entries) in sparx5_fdma_rx_alloc()
353 return -ENOMEM; in sparx5_fdma_rx_alloc()
354 rx->dma = virt_to_phys(rx->dcb_entries); in sparx5_fdma_rx_alloc()
355 rx->last_entry = rx->dcb_entries; in sparx5_fdma_rx_alloc()
356 rx->db_index = 0; in sparx5_fdma_rx_alloc()
357 rx->dcb_index = 0; in sparx5_fdma_rx_alloc()
360 dcb = &rx->dcb_entries[idx]; in sparx5_fdma_rx_alloc()
361 dcb->info = 0; in sparx5_fdma_rx_alloc()
363 * dataptr. In this way when the frame is received the skb->data in sparx5_fdma_rx_alloc()
367 struct sparx5_db_hw *db_hw = &dcb->db[jdx]; in sparx5_fdma_rx_alloc()
371 skb = sparx5_fdma_rx_alloc_skb(rx); in sparx5_fdma_rx_alloc()
373 return -ENOMEM; in sparx5_fdma_rx_alloc()
375 dma_addr = virt_to_phys(skb->data); in sparx5_fdma_rx_alloc()
376 db_hw->dataptr = dma_addr; in sparx5_fdma_rx_alloc()
377 db_hw->status = 0; in sparx5_fdma_rx_alloc()
378 rx->skb[idx][jdx] = skb; in sparx5_fdma_rx_alloc()
380 sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); in sparx5_fdma_rx_alloc()
382 netif_napi_add(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT); in sparx5_fdma_rx_alloc()
383 napi_enable(&rx->napi); in sparx5_fdma_rx_alloc()
384 sparx5_fdma_rx_activate(sparx5, rx); in sparx5_fdma_rx_alloc()
390 struct sparx5_tx *tx = &sparx5->tx; in sparx5_fdma_tx_alloc() local
397 tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); in sparx5_fdma_tx_alloc()
398 if (!tx->curr_entry) in sparx5_fdma_tx_alloc()
399 return -ENOMEM; in sparx5_fdma_tx_alloc()
400 tx->dma = virt_to_phys(tx->curr_entry); in sparx5_fdma_tx_alloc()
401 tx->first_entry = tx->curr_entry; in sparx5_fdma_tx_alloc()
402 INIT_LIST_HEAD(&tx->db_list); in sparx5_fdma_tx_alloc()
405 dcb = &tx->curr_entry[idx]; in sparx5_fdma_tx_alloc()
406 dcb->info = 0; in sparx5_fdma_tx_alloc()
407 /* TX databuffers must be 16byte aligned */ in sparx5_fdma_tx_alloc()
409 struct sparx5_db_hw *db_hw = &dcb->db[jdx]; in sparx5_fdma_tx_alloc()
414 cpu_addr = devm_kzalloc(sparx5->dev, in sparx5_fdma_tx_alloc()
418 return -ENOMEM; in sparx5_fdma_tx_alloc()
420 db_hw->dataptr = phys; in sparx5_fdma_tx_alloc()
421 db_hw->status = 0; in sparx5_fdma_tx_alloc()
422 db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); in sparx5_fdma_tx_alloc()
423 db->cpu_addr = cpu_addr; in sparx5_fdma_tx_alloc()
424 list_add_tail(&db->list, &tx->db_list); in sparx5_fdma_tx_alloc()
426 sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); in sparx5_fdma_tx_alloc()
428 if (idx == FDMA_DCB_MAX - 1) in sparx5_fdma_tx_alloc()
429 tx->curr_entry = dcb; in sparx5_fdma_tx_alloc()
435 struct sparx5_rx *rx, int channel) in sparx5_fdma_rx_init() argument
439 rx->channel_id = channel; in sparx5_fdma_rx_init()
442 struct sparx5_port *port = sparx5->ports[idx]; in sparx5_fdma_rx_init()
444 if (port && port->ndev) { in sparx5_fdma_rx_init()
445 rx->ndev = port->ndev; in sparx5_fdma_rx_init()
452 struct sparx5_tx *tx, int channel) in sparx5_fdma_tx_init() argument
454 tx->channel_id = channel; in sparx5_fdma_tx_init()
468 napi_schedule(&sparx5->rx.napi); in sparx5_fdma_handler()
473 dev_err_ratelimited(sparx5->dev, in sparx5_fdma_handler()
560 sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); in sparx5_fdma_start()
561 sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); in sparx5_fdma_start()
564 dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); in sparx5_fdma_start()
569 dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); in sparx5_fdma_start()
584 napi_disable(&sparx5->rx.napi); in sparx5_fdma_stop()
586 sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); in sparx5_fdma_stop()
587 sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); in sparx5_fdma_stop()
588 /* Wait for the RX channel to stop */ in sparx5_fdma_stop()