1 /*
2 * Copyright (c) 2024 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT snps_dwcxgmac
8
9 #include "eth_dwc_xgmac_priv.h"
10 #include <zephyr/cache.h>
11
12 #define LOG_MODULE_NAME eth_dwc_xgmac
13 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
14 LOG_MODULE_REGISTER(LOG_MODULE_NAME, LOG_LEVEL);
15
16 #define ETH_XGMAC_CHECK_RESET(inst) DT_NODE_HAS_PROP(DT_DRV_INST(inst), resets)
17
18 #ifdef CONFIG_NET_STATISTICS_ETHERNET
19 #define UPDATE_ETH_STATS_TX_PKT_CNT(dev_data, incr) (dev_data->stats.pkts.tx += incr)
20 #define UPDATE_ETH_STATS_RX_PKT_CNT(dev_data, incr) (dev_data->stats.pkts.rx += incr)
21 #define UPDATE_ETH_STATS_TX_BYTE_CNT(dev_data, incr) (dev_data->stats.bytes.sent += incr)
22 #define UPDATE_ETH_STATS_RX_BYTE_CNT(dev_data, incr) (dev_data->stats.bytes.received += incr)
23 #define UPDATE_ETH_STATS_TX_ERROR_PKT_CNT(dev_data, incr) (dev_data->stats.errors.tx += incr)
24 #define UPDATE_ETH_STATS_RX_ERROR_PKT_CNT(dev_data, incr) (dev_data->stats.errors.rx += incr)
25 #define UPDATE_ETH_STATS_TX_DROP_PKT_CNT(dev_data, incr) (dev_data->stats.tx_dropped += incr)
26 #else
27 #define UPDATE_ETH_STATS_TX_PKT_CNT(dev_data, incr)
28 #define UPDATE_ETH_STATS_RX_PKT_CNT(dev_data, incr)
29 #define UPDATE_ETH_STATS_TX_BYTE_CNT(dev_data, incr)
30 #define UPDATE_ETH_STATS_RX_BYTE_CNT(dev_data, incr)
31 #define UPDATE_ETH_STATS_TX_ERROR_PKT_CNT(dev_data, incr)
32 #define UPDATE_ETH_STATS_RX_ERROR_PKT_CNT(dev_data, incr)
33 #define UPDATE_ETH_STATS_TX_DROP_PKT_CNT(dev_data, incr)
34 #endif
35
36 /**
37 * @brief Run-time device configuration data structure.
38 *
39 * This struct contains all device configuration data for a XGMAC
40 * controller instance which is modifiable at run-time, such as
41 * data relating to the attached PHY or the auxiliary thread.
42 */
43 struct eth_dwc_xgmac_dev_data {
44 DEVICE_MMIO_RAM;
45 /**
46 * Device running status. eth_dwc_xgmac_start_device API will will variable and
47 * eth_dwc_xgmac_stop_device APi will clear this variable.
48 */
49 bool dev_started;
50 /* This field specifies the ethernet link type full duplex or half duplex. */
51 bool enable_full_duplex;
52 /* Ethernet auto-negotiation status. */
53 bool auto_neg;
54 /* Ethernet promiscuous mode status. */
55 bool promisc_mode;
56 /* Ethernet interface structure associated with this device. */
57 struct net_if *iface;
58 /* Current Ethernet link speed 10Mbps/100Mbps/1Gbps. */
59 enum eth_dwc_xgmac_link_speed link_speed;
60 /* Global pointer to DMA receive descriptors ring. */
61 struct xgmac_dma_rx_desc *dma_rx_desc;
62 /* Global pointer to DMA transmit descriptors ring. */
63 struct xgmac_dma_tx_desc *dma_tx_desc;
64 /* Global pointer to DMA transmit descriptors ring's meta data. */
65 volatile struct xgmac_dma_tx_desc_meta *tx_desc_meta;
66 /* Global pointer to DMA receive descriptors ring's meta data. */
67 volatile struct xgmac_dma_rx_desc_meta *rx_desc_meta;
68 /*
69 * Array of pointers pointing to Transmit packets under transmission.
70 * These pointers will be cleared once the packet transmission is completed.
71 */
72 mem_addr_t *tx_pkts;
73 /**
74 * Array of pointers pointing to receive buffers reserved for receive data.
75 * Data received by XGMAC will be copied to these buffers. An empty network packet
76 * will reserved as receive packet, these buffers will be added to a the receive packet.
77 * After a buffer is added to the receive packet a new buffer will be reserved
78 * and replaced with the used buffers for future receive data.
79 */
80 mem_addr_t *rx_buffs;
81 /* A global pointer pointing to XGMAC IRQ context data. */
82 struct xgmac_irq_cntxt_data irq_cntxt_data;
83 #ifdef CONFIG_NET_STATISTICS_ETHERNET
84 /* Ethernet statistics captured by XGMAC driver */
85 struct net_stats_eth stats;
86 #endif /* CONFIG_NET_STATISTICS_ETHERNET */
87 #ifdef CONFIG_ETH_DWC_XGMAC_POLLING_MODE
88 /* timer for interrupt polling */
89 struct k_timer isr_polling_timer;
90 #endif /* CONFIG_ETH_DWC_XGMAC_POLLING_MODE */
91 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
92 /* work que item for processing TX interrupt bottom half */
93 struct k_work isr_work;
94 #endif /*CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE*/
95 struct k_mutex dev_cfg_lock;
96 /* Device MAC address */
97 uint8_t mac_addr[6];
98 };
99
100 /**
101 * @brief Constant device configuration data structure.
102 *
103 * This struct contains all device configuration data for a XGMAC
104 * controller instance which is constant. The data herein is
105 * either acquired from the generated header file based on the
106 * data from Kconfig, or from header file based on the device tree
107 * data. Some of the data contained, in particular data relating
108 * to clock sources, is specific to the platform, which contain the XGMAC.
109 */
110 struct eth_dwc_xgmac_config {
111 DEVICE_MMIO_ROM;
112 /* Use a random MAC address generated when the driver is initialized */
113 bool random_mac_address;
114 /* Number of TX queues configured */
115 uint8_t num_tx_Qs;
116 /* Number of RX queues configured */
117 uint8_t num_rx_Qs;
118 /* Number of DMA channels configured */
119 uint8_t num_dma_chnl;
120 /* Number of traffic classes configured */
121 uint8_t num_TCs;
122 /* Maximum transfer unit length configured */
123 uint16_t mtu;
124 /* Transmit FIFO size */
125 uint32_t tx_fifo_size;
126 /* Receive FIFO size */
127 uint32_t rx_fifo_size;
128 /* XGMAC DMA configuration */
129 struct xgmac_dma_cfg dma_cfg;
130 /* XGMAC DMA channels configuration */
131 struct xgmac_dma_chnl_config dma_chnl_cfg;
132 /* XGMAC MTL configuration */
133 struct xgmac_mtl_config mtl_cfg;
134 /* XGMAC core configuration */
135 struct xgmac_mac_config mac_cfg;
136 /* Global pointer to traffic classes and queues configuration */
137 struct xgmac_tcq_config *tcq_config;
138 /* Ethernet PHY device pointer */
139 const struct device *phy_dev;
140 /* Interrupts configuration function pointer */
141 eth_config_irq_t irq_config_fn;
142 /* Interrupts enable function pointer */
143 eth_enable_irq_t irq_enable_fn;
144 };
145
get_reg_base_addr(const struct device * dev)146 static inline mem_addr_t get_reg_base_addr(const struct device *dev)
147 {
148 return (mem_addr_t)DEVICE_MMIO_GET(dev);
149 }
150
dwxgmac_dma_init(const struct device * dev,const struct xgmac_dma_cfg * const dma_cfg)151 static void dwxgmac_dma_init(const struct device *dev, const struct xgmac_dma_cfg *const dma_cfg)
152 {
153 mem_addr_t ioaddr = get_reg_base_addr(dev);
154 mem_addr_t reg_addr =
155 (mem_addr_t)(ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_SYSBUS_MODE_OFST);
156
157 /**
158 * configure burst length, number of outstanding requests, enhanced Address Mode in
159 * DMA system bus mode register to controls the behavior of the AXI master.
160 */
161 uint32_t reg_val = DMA_SYSBUS_MODE_RD_OSR_LMT_SET(dma_cfg->rd_osr_lmt) |
162 DMA_SYSBUS_MODE_WR_OSR_LMT_SET(dma_cfg->wr_osr_lmt) |
163 DMA_SYSBUS_MODE_AAL_SET(dma_cfg->aal) |
164 DMA_SYSBUS_MODE_EAME_SET(dma_cfg->eame) |
165 DMA_SYSBUS_MODE_BLEN4_SET(dma_cfg->blen4) |
166 DMA_SYSBUS_MODE_BLEN8_SET(dma_cfg->blen8) |
167 DMA_SYSBUS_MODE_BLEN16_SET(dma_cfg->blen16) |
168 DMA_SYSBUS_MODE_BLEN32_SET(dma_cfg->blen32) |
169 DMA_SYSBUS_MODE_BLEN64_SET(dma_cfg->blen64) |
170 DMA_SYSBUS_MODE_BLEN128_SET(dma_cfg->blen128) |
171 DMA_SYSBUS_MODE_BLEN256_SET(dma_cfg->blen256) |
172 DMA_SYSBUS_MODE_UNDEF_SET(dma_cfg->ubl);
173
174 sys_write32(reg_val, reg_addr);
175
176 /* Configure TX Descriptor Pre-fetch threshold Size in TX enhanced DMA control register*/
177 reg_addr = ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_TX_EDMA_CONTROL_OFST;
178
179 reg_val = DMA_TX_EDMA_CONTROL_TDPS_SET(dma_cfg->edma_tdps);
180
181 sys_write32(reg_val, reg_addr);
182
183 /* Configure RX Descriptor Pre-fetch threshold Size in TX enhanced DMA control register*/
184 reg_addr = ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_RX_EDMA_CONTROL_OFST;
185
186 reg_val = DMA_RX_EDMA_CONTROL_RDPS_SET(dma_cfg->edma_rdps);
187
188 sys_write32(reg_val, reg_addr);
189 LOG_DBG("%s: DMA engine common initialization completed", dev->name);
190 }
191
dwxgmac_dma_chnl_init(const struct device * dev,const struct eth_dwc_xgmac_config * const config,struct eth_dwc_xgmac_dev_data * const data)192 static void dwxgmac_dma_chnl_init(const struct device *dev,
193 const struct eth_dwc_xgmac_config *const config,
194 struct eth_dwc_xgmac_dev_data *const data)
195 {
196 uint32_t dma_chnl;
197 uint32_t max_dma_chnl = config->num_dma_chnl;
198 struct xgmac_dma_chnl_config *const dma_chnl_cfg =
199 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg;
200 struct xgmac_dma_tx_desc_meta *tx_desc_meta;
201 struct xgmac_dma_rx_desc_meta *rx_desc_meta;
202 uint32_t reg_val;
203 mem_addr_t ioaddr = get_reg_base_addr(dev);
204 mem_addr_t reg_addr;
205
206 for (dma_chnl = 0; dma_chnl < max_dma_chnl; dma_chnl++) {
207 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl];
208 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl];
209
210 /**
211 * Configure Header-Payload Split feature, 8xPBL mode (burst length) and
212 * Maximum Segment Size in DMA channel x control register.
213 */
214 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
215 DMA_CHx_CONTROL_OFST);
216 reg_val = DMA_CHx_CONTROL_SPH_SET(dma_chnl_cfg->sph) |
217 DMA_CHx_CONTROL_PBLX8_SET(dma_chnl_cfg->pblx8) |
218 DMA_CHx_CONTROL_MSS_SET(dma_chnl_cfg->mss);
219 sys_write32(reg_val, reg_addr);
220
221 /**
222 * Configure transmit path AXI programmable burst length, TCP segmentation and
223 * Operate on Second Packet in DMA channel TX control register.
224 */
225 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
226 DMA_CHx_TX_CONTROL_OFST);
227 reg_val = DMA_CHx_TX_CONTROL_TXPBL_SET(dma_chnl_cfg->txpbl) |
228 DMA_CHx_TX_CONTROL_TSE_SET(dma_chnl_cfg->tse) |
229 DMA_CHx_TX_CONTROL_RESERVED_OSP_SET(dma_chnl_cfg->osp);
230 sys_write32(reg_val, reg_addr);
231
232 /**
233 * Enable Rx DMA Packet Flush and Configure receive path AXI programmable burst
234 * length and receive buffer size in DMA channel RX control register.
235 */
236 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
237 DMA_CHx_RX_CONTROL_OFST);
238 reg_val = DMA_CHx_RX_CONTROL_RPF_SET(1u) |
239 DMA_CHx_RX_CONTROL_RXPBL_SET(dma_chnl_cfg->rxpbl) |
240 DMA_CHx_RX_CONTROL_RBSZ_SET(CONFIG_NET_BUF_DATA_SIZE);
241 sys_write32(reg_val, reg_addr);
242
243 /* Initialize the DMA channel TX descriptors ring header address high register */
244 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
245 DMA_CHx_TXDESC_LIST_HADDRESS_OFST);
246 reg_val = DMA_CHx_TXDESC_LIST_HADDRESS_TDESHA_SET(tx_desc_meta->desc_list_addr >>
247 32u);
248 sys_write32(reg_val, reg_addr);
249
250 /* Initialize the DMA channel TX descriptors ring header address low register */
251 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
252 DMA_CHx_TXDESC_LIST_LADDRESS_OFST);
253 reg_val = tx_desc_meta->desc_list_addr;
254 sys_write32(reg_val, reg_addr);
255
256 /* Initialize the DMA channel RX descriptors ring header address high register */
257 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
258 DMA_CHx_RXDESC_LIST_HADDRESS_OFST);
259 reg_val = rx_desc_meta->desc_list_addr >> 32u;
260 sys_write32(reg_val, reg_addr);
261
262 /* Initialize the DMA channel RX descriptors ring header address low register */
263 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
264 DMA_CHx_RXDESC_LIST_LADDRESS_OFST);
265 reg_val = rx_desc_meta->desc_list_addr;
266 sys_write32(reg_val, reg_addr);
267
268 /* Initialize the DMA channel TX descriptors ring tail address high register */
269 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
270 DMA_CHx_TXDESC_TAIL_LPOINTER_OFST);
271 reg_val = DMA_CHx_TXDESC_TAIL_LPOINTER_TDT_SET(tx_desc_meta->desc_tail_addr);
272 sys_write32(reg_val, reg_addr);
273
274 /* Initialize the DMA channel TX descriptors ring tail address low register */
275 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
276 DMA_CHx_RXDESC_TAIL_LPOINTER_OFST);
277 reg_val = DMA_CHx_RXDESC_TAIL_LPOINTER_RDT_SET(rx_desc_meta->desc_tail_addr);
278 sys_write32(reg_val, reg_addr);
279
280 /* Initialize the DMA channel RX descriptors ring tail address high register */
281 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
282 DMA_CHx_TX_CONTROL2_OFST);
283 reg_val = DMA_CHx_TX_CONTROL2_TDRL_SET((dma_chnl_cfg->tdrl - 1u));
284 sys_write32(reg_val, reg_addr);
285
286 /* Initialize the DMA channel RX descriptors ring tail address low register */
287 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
288 DMA_CHx_RX_CONTROL2_OFST);
289 reg_val = DMA_CHx_RX_CONTROL2_RDRL_SET((dma_chnl_cfg->rdrl - 1u));
290 sys_write32(reg_val, reg_addr);
291
292 /* Initialize channel metadata */
293 tx_desc_meta->next_to_use = 0u;
294 rx_desc_meta->next_to_read = 0u;
295 rx_desc_meta->rx_pkt = (struct net_pkt *)NULL;
296 LOG_DBG("%s: DMA channel %d initialization completed", dev->name, dma_chnl);
297 }
298 }
299
dwxgmac_dma_desc_init(const struct eth_dwc_xgmac_config * const config,struct eth_dwc_xgmac_dev_data * const data)300 static void dwxgmac_dma_desc_init(const struct eth_dwc_xgmac_config *const config,
301 struct eth_dwc_xgmac_dev_data *const data)
302 {
303 const uint32_t max_dma_chnl = config->num_dma_chnl;
304 uint32_t dma_chnl;
305 struct xgmac_dma_chnl_config *const dma_chnl_cfg =
306 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg;
307
308 struct xgmac_dma_tx_desc_meta *tx_desc_meta;
309 struct xgmac_dma_rx_desc_meta *rx_desc_meta;
310
311 for (dma_chnl = 0; dma_chnl < max_dma_chnl; dma_chnl++) {
312 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl];
313 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl];
314
315 tx_desc_meta->desc_list_addr =
316 POINTER_TO_UINT(data->dma_tx_desc + (dma_chnl * dma_chnl_cfg->tdrl));
317 tx_desc_meta->desc_tail_addr = POINTER_TO_UINT(tx_desc_meta->desc_list_addr);
318
319 memset((void *)(tx_desc_meta->desc_list_addr), 0,
320 ((dma_chnl_cfg->tdrl) * sizeof(struct xgmac_dma_tx_desc)));
321
322 rx_desc_meta->desc_list_addr =
323 POINTER_TO_UINT(data->dma_rx_desc + (dma_chnl * dma_chnl_cfg->rdrl));
324 rx_desc_meta->desc_tail_addr = POINTER_TO_UINT(rx_desc_meta->desc_list_addr);
325
326 memset((void *)(rx_desc_meta->desc_list_addr), 0,
327 ((dma_chnl_cfg->rdrl) * sizeof(struct xgmac_dma_rx_desc)));
328 }
329 }
330
dwxgmac_dma_mtl_init(const struct device * dev,const struct eth_dwc_xgmac_config * const config)331 static void dwxgmac_dma_mtl_init(const struct device *dev,
332 const struct eth_dwc_xgmac_config *const config)
333 {
334 uint32_t max_q_count =
335 config->num_tx_Qs > config->num_rx_Qs ? config->num_tx_Qs : config->num_rx_Qs;
336 uint32_t q_idx;
337
338 struct xgmac_mtl_config *mtl_cfg = (struct xgmac_mtl_config *)&config->mtl_cfg;
339 struct xgmac_tcq_config *const tcq_config = (struct xgmac_tcq_config *)config->tcq_config;
340
341 mem_addr_t ioaddr = get_reg_base_addr(dev);
342
343 /* Configure MTL operation mode options */
344 mem_addr_t reg_addr =
345 (mem_addr_t)(ioaddr + XGMAC_MTL_BASE_ADDR_OFFSET + MTL_OPERATION_MODE_OFST);
346 uint32_t reg_val = MTL_OPERATION_MODE_ETSALG_SET(mtl_cfg->etsalg) |
347 MTL_OPERATION_MODE_RAA_SET(mtl_cfg->raa);
348 sys_write32(reg_val, reg_addr);
349
350 /* Program the Traffic class priorites. */
351 for (uint32_t tc_id = 0; tc_id < config->num_TCs; tc_id++) {
352 reg_addr = (ioaddr + XGMAC_MTL_BASE_ADDR_OFFSET + MTL_TC_PRTY_MAP0_OFST +
353 ((tc_id / NUM_OF_TCs_PER_TC_PRTY_MAP_REG) * XGMAC_REG_SIZE_BYTES));
354 reg_val = (sys_read32(reg_addr) &
355 MTL_TCx_PRTY_MAP_MSK(tc_id % NUM_OF_TCs_PER_TC_PRTY_MAP_REG));
356 reg_val |= MTL_TCx_PRTY_MAP_PSTC_SET((tc_id % NUM_OF_TCs_PER_TC_PRTY_MAP_REG),
357 tcq_config->pstc[tc_id]);
358 sys_write32(reg_val, reg_addr);
359 }
360
361 for (q_idx = 0u; q_idx < max_q_count; q_idx++) {
362 /**
363 * Below sequence of register initializations are required for the MTL transmit
364 * and receive queues initialization. Refer registers description in dwcxgmac data
365 * book for more details.
366 * - Enable dynamic mapping of RX queues to RX DMA channels by programming
367 * QxDDMACH bit in MTL_RXQ_DMA_MAP register.
368 * - Configure MTL TX queue options and enable the TX queue.
369 */
370 reg_addr = (ioaddr + XGMAC_MTL_BASE_ADDR_OFFSET + MTL_RXQ_DMA_MAP0_OFST +
371 ((q_idx / NUM_OF_RxQs_PER_DMA_MAP_REG) * XGMAC_REG_SIZE_BYTES));
372 reg_val = (sys_read32(reg_addr) &
373 MTL_RXQ_DMA_MAP_Qx_MSK(q_idx % NUM_OF_RxQs_PER_DMA_MAP_REG));
374 reg_val |= MTL_RXQ_DMA_MAP_QxDDMACH_SET((q_idx % NUM_OF_RxQs_PER_DMA_MAP_REG),
375 READ_BIT(tcq_config->rx_q_ddma_en, q_idx)) |
376 MTL_RXQ_DMA_MAP_QxMDMACH_SET((q_idx % NUM_OF_RxQs_PER_DMA_MAP_REG),
377 tcq_config->rx_q_dma_chnl_sel[q_idx]);
378 sys_write32(reg_val, reg_addr);
379
380 reg_addr = (ioaddr + XGMAC_MTL_TCQx_BASE_ADDR_OFFSET(q_idx) +
381 MTL_TCQx_MTL_TXQx_OPERATION_MODE_OFST);
382 reg_val = MTL_TCQx_MTL_TXQx_OPERATION_MODE_TQS_SET(tcq_config->tx_q_size[q_idx]) |
383 MTL_TCQx_MTL_TXQx_OPERATION_MODE_Q2TCMAP_SET(
384 tcq_config->q_to_tc_map[q_idx]) |
385 MTL_TCQx_MTL_TXQx_OPERATION_MODE_TTC_SET(tcq_config->ttc[q_idx]) |
386 MTL_TCQx_MTL_TXQx_OPERATION_MODE_TXQEN_SET(2u) |
387 MTL_TCQx_MTL_TXQx_OPERATION_MODE_TSF_SET(
388 READ_BIT(tcq_config->tsf_en, q_idx));
389 sys_write32(reg_val, reg_addr);
390
391 reg_addr = (ioaddr + XGMAC_MTL_TCQx_BASE_ADDR_OFFSET(q_idx) +
392 MTL_TCQx_MTC_TCx_ETS_CONTROL_OFST);
393 reg_val = MTL_TCQx_MTC_TCx_ETS_CONTROL_TSA_SET(tcq_config->tsa[q_idx]);
394 sys_write32(reg_val, reg_addr);
395
396 reg_addr = (ioaddr + XGMAC_MTL_TCQx_BASE_ADDR_OFFSET(q_idx) +
397 MTL_TCQx_MTL_RXQx_OPERATION_MODE_OFST);
398 reg_val = MTL_TCQx_MTL_RXQx_OPERATION_MODE_RQS_SET(tcq_config->rx_q_size[q_idx]) |
399 MTL_TCQx_MTL_RXQx_OPERATION_MODE_EHFC_SET(
400 READ_BIT(tcq_config->hfc_en, q_idx)) |
401 MTL_TCQx_MTL_RXQx_OPERATION_MODE_DIS_TCP_EF_SET(
402 READ_BIT(tcq_config->cs_err_pkt_drop_dis, q_idx)) |
403 MTL_TCQx_MTL_RXQx_OPERATION_MODE_RSF_SET(
404 READ_BIT(tcq_config->rsf_en, q_idx)) |
405 MTL_TCQx_MTL_RXQx_OPERATION_MODE_FEF_SET(
406 READ_BIT(tcq_config->fep_en, q_idx)) |
407 MTL_TCQx_MTL_RXQx_OPERATION_MODE_FUF_SET(
408 READ_BIT(tcq_config->fup_en, q_idx)) |
409 MTL_TCQx_MTL_RXQx_OPERATION_MODE_RTC_SET(tcq_config->rtc[q_idx]);
410 sys_write32(reg_val, reg_addr);
411 }
412 }
413
dwxgmac_set_mac_addr_by_idx(const struct device * dev,uint8_t * addr,uint8_t idx,bool sa)414 static void dwxgmac_set_mac_addr_by_idx(const struct device *dev, uint8_t *addr, uint8_t idx,
415 bool sa)
416 {
417 mem_addr_t ioaddr = get_reg_base_addr(dev);
418 uint32_t reg_val;
419
420 reg_val = (addr[MAC_ADDR_BYTE_5] << BIT_OFFSET_8) | addr[MAC_ADDR_BYTE_4];
421 if (idx != 0u) {
422 /**
423 * 'sa' bit specifies if This MAC address[47:0] is used to compare with the source
424 * address fields of the received packet. MAC Address with index 0 is always enabled
425 * for recive packet MAC address filtering. And 'sa' bit of MAC address with index 0
426 * is reserved hence this step is excluded for index 0.
427 */
428 reg_val |= CORE_MAC_ADDRESSx_HIGH_SA_SET(sa);
429 }
430 sys_write32(reg_val | CORE_MAC_ADDRESS1_HIGH_AE_SET_MSK,
431 ioaddr + XGMAC_CORE_ADDRx_HIGH(idx));
432
433 reg_val = (addr[MAC_ADDR_BYTE_3] << BIT_OFFSET_24) |
434 (addr[MAC_ADDR_BYTE_2] << BIT_OFFSET_16) |
435 (addr[MAC_ADDR_BYTE_1] << BIT_OFFSET_8) | addr[MAC_ADDR_BYTE_0];
436 sys_write32(reg_val, ioaddr + XGMAC_CORE_ADDRx_LOW(idx));
437 LOG_DBG("%s: Update MAC address %x %x %x %x %x %x at index %d", dev->name,
438 addr[MAC_ADDR_BYTE_5], addr[MAC_ADDR_BYTE_4], addr[MAC_ADDR_BYTE_3],
439 addr[MAC_ADDR_BYTE_2], addr[MAC_ADDR_BYTE_1], addr[MAC_ADDR_BYTE_0], idx);
440 }
441
eth_dwc_xgmac_update_link_speed(const struct device * dev,enum eth_dwc_xgmac_link_speed link_speed)442 static void eth_dwc_xgmac_update_link_speed(const struct device *dev,
443 enum eth_dwc_xgmac_link_speed link_speed)
444 {
445 mem_addr_t ioaddr = get_reg_base_addr(dev);
446 uint32_t reg_val;
447
448 reg_val = sys_read32(ioaddr + CORE_MAC_TX_CONFIGURATION_OFST);
449 reg_val &= CORE_MAC_TX_CONFIGURATION_SS_CLR_MSK;
450
451 switch (link_speed) {
452 case LINK_10MBIT:
453 reg_val |= CORE_MAC_TX_CONFIGURATION_SS_SET(CORE_MAC_TX_CONFIGURATION_SS_10MHZ);
454 LOG_DBG("%s: MAC link speed updated to 10Mbps", dev->name);
455 break;
456 case LINK_100MBIT:
457 reg_val |= CORE_MAC_TX_CONFIGURATION_SS_SET(CORE_MAC_TX_CONFIGURATION_SS_100MHZ);
458 LOG_DBG("%s: MAC link speed updated to 100Mbps", dev->name);
459 break;
460 case LINK_1GBIT:
461 reg_val |= CORE_MAC_TX_CONFIGURATION_SS_SET(CORE_MAC_TX_CONFIGURATION_SS_1000MHZ);
462 LOG_DBG("%s: MAC link speed updated to 1Gbps", dev->name);
463 break;
464 default:
465 LOG_ERR("%s: Invalid link speed configuration value", dev->name);
466 }
467
468 sys_write32(reg_val, ioaddr + CORE_MAC_TX_CONFIGURATION_OFST);
469 }
470
dwxgmac_mac_init(const struct device * dev,const struct eth_dwc_xgmac_config * const config,struct eth_dwc_xgmac_dev_data * const data)471 static void dwxgmac_mac_init(const struct device *dev,
472 const struct eth_dwc_xgmac_config *const config,
473 struct eth_dwc_xgmac_dev_data *const data)
474 {
475 struct xgmac_mac_config *const mac_cfg = (struct xgmac_mac_config *)&config->mac_cfg;
476 uint32_t ioaddr = get_reg_base_addr(dev);
477 uint32_t reg_val;
478
479 /* Enable MAC HASH & MAC Perfect filtering */
480 reg_val =
481 #ifndef CONFIG_ETH_DWC_XGMAC_HW_FILTERING
482 CORE_MAC_PACKET_FILTER_RA_SET(SET_BIT) | CORE_MAC_PACKET_FILTER_PM_SET(SET_BIT);
483 #else
484 #ifdef CONFIG_ETH_DWC_XGMAC_HW_L3_L4_FILTERING
485 CORE_MAC_PACKET_FILTER_IPFE_SET(SET_BIT) |
486 #endif
487 CORE_MAC_PACKET_FILTER_HPF_SET(SET_BIT) | CORE_MAC_PACKET_FILTER_HMC_SET(SET_BIT) |
488 CORE_MAC_PACKET_FILTER_HUC_SET(SET_BIT);
489 #endif
490
491 sys_write32(reg_val, ioaddr + CORE_MAC_PACKET_FILTER_OFST);
492
493 /* Enable Recive queues for Data Center Bridging/ Generic */
494 reg_val = 0;
495 for (uint32_t q = 0; q < config->num_rx_Qs; q++) {
496 reg_val |= (XGMAC_RXQxEN_DCB << (q * XGMAC_RXQxEN_SIZE_BITS));
497 }
498 sys_write32(reg_val, ioaddr + CORE_MAC_RXQ_CTRL0_OFST);
499
500 /* Disable jabber timer in MAC TX configuration register */
501 reg_val = CORE_MAC_TX_CONFIGURATION_JD_SET(SET_BIT);
502 sys_write32(reg_val, ioaddr + CORE_MAC_TX_CONFIGURATION_OFST);
503
504 /**
505 * Enable Giant Packet Size Limit Control, disable eatchdog timer on reciver and
506 * Configure RX checksum offload, jumbo packet enable, ARP offload, gaint packet size limit
507 * in MAC RX configuration register.
508 */
509 reg_val = CORE_MAC_RX_CONFIGURATION_GPSLCE_SET(SET_BIT) |
510 #ifdef CONFIG_ETH_DWC_XGMAC_RX_CS_OFFLOAD
511 CORE_MAC_RX_CONFIGURATION_IPC_SET(SET_BIT) |
512 #endif
513 CORE_MAC_RX_CONFIGURATION_WD_SET(SET_BIT) |
514 CORE_MAC_RX_CONFIGURATION_JE_SET(mac_cfg->je) |
515 CORE_MAC_RX_CONFIGURATION_ARPEN_SET(mac_cfg->arp_offload_en) |
516 CORE_MAC_RX_CONFIGURATION_GPSL_SET(mac_cfg->gpsl);
517
518 sys_write32(reg_val, ioaddr + CORE_MAC_RX_CONFIGURATION_OFST);
519
520 /* Configure MAC link speed */
521 eth_dwc_xgmac_update_link_speed(dev, data->link_speed);
522 }
523
dwxgmac_irq_init(const struct device * dev)524 static inline void dwxgmac_irq_init(const struct device *dev)
525 {
526 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data;
527 mem_addr_t reg_addr;
528 uint32_t reg_val;
529 mem_addr_t ioaddr = get_reg_base_addr(dev);
530
531 reg_addr = ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_MODE_OFST;
532 reg_val = (sys_read32(reg_addr) & DMA_MODE_INTM_CLR_MSK);
533 sys_write32(reg_val, reg_addr);
534 data->irq_cntxt_data.dev = dev;
535 }
536
add_buffs_to_pkt(struct net_pkt * rx_pkt,struct net_buf * buff1,uint16_t buff1_len,struct net_buf * buff2,uint16_t buff2_len)537 static inline void add_buffs_to_pkt(struct net_pkt *rx_pkt, struct net_buf *buff1,
538 uint16_t buff1_len, struct net_buf *buff2, uint16_t buff2_len)
539 {
540 /* Add the receive buffers in RX packet. */
541 buff1->len = buff1_len;
542 arch_dcache_invd_range(buff1->data, CONFIG_NET_BUF_DATA_SIZE);
543 net_pkt_frag_add(rx_pkt, buff1);
544 if (buff2_len) {
545 buff2->len = buff2_len;
546 arch_dcache_invd_range(buff2->data, CONFIG_NET_BUF_DATA_SIZE);
547 net_pkt_frag_add(rx_pkt, buff2);
548 } else {
549 /**
550 * If second buffer length zero then put it back to RX buffer
551 * pool by freeing it.
552 */
553 net_pkt_frag_unref(buff2);
554 }
555 }
556
get_and_refill_desc_buffs(struct xgmac_dma_rx_desc * rx_desc,uint16_t desc_id,mem_addr_t * rx_buffs,struct net_buf ** buff1,struct net_buf ** buff2)557 static void get_and_refill_desc_buffs(struct xgmac_dma_rx_desc *rx_desc, uint16_t desc_id,
558 mem_addr_t *rx_buffs, struct net_buf **buff1,
559 struct net_buf **buff2)
560 {
561 struct net_buf *new_buff;
562
563 *buff1 = (struct net_buf *)((mem_addr_t)*(rx_buffs + (desc_id * RX_FRAGS_PER_DESC)));
564 *buff2 = (struct net_buf *)((mem_addr_t)*(rx_buffs + (desc_id * RX_FRAGS_PER_DESC) + 1u));
565 /* Reserve a free buffer in netwrok RX buffers pool */
566 new_buff = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_FOREVER);
567 if (!new_buff) {
568 LOG_ERR("Failed to allocate a network buffer to refill the DMA descriptor");
569 return;
570 }
571 /**
572 * Replace newly reserved buffer one address with old buffer one address in rx_buffs
573 * array at the index corresponding to the descriptor index.
574 */
575 *(rx_buffs + (desc_id * RX_FRAGS_PER_DESC)) = (mem_addr_t)new_buff;
576 /**
577 * Update the dword0 and dword1 of the receive descriptor with buffer address available in
578 * newly reserved buffment. dword0 and dword1 combinely makes 64bit address of the RX data
579 * buffer.
580 */
581 rx_desc->rdes0 = POINTER_TO_UINT(new_buff->data);
582 rx_desc->rdes1 = POINTER_TO_UINT(new_buff->data) >> XGMAC_REG_SIZE_BITS;
583 /* Reserve another free buffer in netwrok RX buffers pool */
584 new_buff = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_FOREVER);
585 if (!new_buff) {
586 /**
587 * If we fails reserve another buffer to fill the RX descriptor buffer pointer
588 * 2, then free the previusly allocated first buffer too. Log an error and return.
589 */
590 rx_desc->rdes0 = 0u;
591 rx_desc->rdes0 = 1u;
592 net_pkt_frag_unref((struct net_buf *)(*(rx_buffs + (desc_id * RX_FRAGS_PER_DESC))));
593 *(rx_buffs + (desc_id * RX_FRAGS_PER_DESC)) = (mem_addr_t)NULL;
594 LOG_ERR("Failed to allocate a network buffer to refill the DMA descriptor");
595 return;
596 }
597 /**
598 * Replace newly reserved buffer2 address with old buffer2 address in rx_buffs
599 * array at the index corresponding to the descriptor index.
600 */
601 *(rx_buffs + (desc_id * RX_FRAGS_PER_DESC) + 1u) = (mem_addr_t)new_buff;
602 /**
603 * Update the dword2 and dword3 of the receive descriptor with buffer address available in
604 * newly reserved buffer2. dword2 and part of dword3 together makes address of the RX
605 * data buffer.
606 */
607 rx_desc->rdes2 = POINTER_TO_UINT(new_buff->data);
608 /**
609 * Put the RX descriptor back to DMA ownership by setting OWN bit in RX descriptor dword3
610 * Set IOC bit in dword3 to receive an interrupt after this RX descriptor is beling proceesd
611 * and put to application ownership.
612 */
613 rx_desc->rdes3 = XGMAC_RDES3_OWN | XGMAC_RDES3_IOC |
614 (POINTER_TO_UINT(new_buff->data) >> XGMAC_REG_SIZE_BITS);
615 }
616
eth_dwc_xgmac_rx_irq_work(const struct device * dev,uint32_t dma_chnl)617 static void eth_dwc_xgmac_rx_irq_work(const struct device *dev, uint32_t dma_chnl)
618 {
619 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data;
620 const struct eth_dwc_xgmac_config *const config =
621 (struct eth_dwc_xgmac_config *)dev->config;
622 struct xgmac_dma_chnl_config *dma_chnl_cfg =
623 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg;
624 struct xgmac_dma_rx_desc_meta *rx_desc_meta =
625 (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl];
626 struct xgmac_dma_rx_desc *fisrt_rx_desc =
627 (struct xgmac_dma_rx_desc *)(data->dma_rx_desc + (dma_chnl * dma_chnl_cfg->rdrl));
628 struct xgmac_dma_rx_desc *rx_desc, rx_desc_data;
629 struct net_buf *buff1 = NULL, *buff2 = NULL;
630 uint32_t desc_data_len;
631 int err;
632
633 mem_addr_t *rx_buffs = (mem_addr_t *)(data->rx_buffs + (((dma_chnl * dma_chnl_cfg->rdrl)) *
634 RX_FRAGS_PER_DESC));
635
636 rx_desc = (struct xgmac_dma_rx_desc *)(fisrt_rx_desc + rx_desc_meta->next_to_read);
637 arch_dcache_invd_range(rx_desc, sizeof(rx_desc));
638 rx_desc_data = *(rx_desc);
639 while (!(rx_desc_data.rdes3 & XGMAC_RDES3_OWN)) {
640 get_and_refill_desc_buffs(rx_desc, rx_desc_meta->next_to_read, rx_buffs, &buff1,
641 &buff2);
642 arch_dcache_flush_range(rx_desc, sizeof(rx_desc));
643
644 if (rx_desc_data.rdes3 & XGMAC_RDES3_FD) {
645 LOG_DBG("%s: received FD buffer. descriptor indx = %d", dev->name,
646 rx_desc_meta->next_to_read);
647 if (rx_desc_meta->rx_pkt) {
648 net_pkt_frag_unref(rx_desc_meta->rx_pkt->frags);
649 net_pkt_unref(rx_desc_meta->rx_pkt);
650 }
651 rx_desc_meta->rx_pkt = net_pkt_rx_alloc_on_iface(data->iface, K_NO_WAIT);
652 if (!rx_desc_meta->rx_pkt) {
653 LOG_ERR("%s: Failed allocate a network packet for receive data",
654 dev->name);
655 /* Error processing */
656 return;
657 }
658 }
659
660 if (rx_desc_meta->rx_pkt != NULL) {
661 if (rx_desc_data.rdes3 & XGMAC_RDES3_LD) {
662 LOG_DBG("%s: received LD buffer. descriptor indx = %d", dev->name,
663 rx_desc_meta->next_to_read);
664 UPDATE_ETH_STATS_RX_PKT_CNT(data, 1u);
665
666 if (!(rx_desc_data.rdes3 & XGMAC_RDES3_ES)) {
667 desc_data_len =
668 (rx_desc_data.rdes3 & XGMAC_RDES3_PL) %
669 (CONFIG_NET_BUF_DATA_SIZE * RX_FRAGS_PER_DESC);
670
671 if (desc_data_len > CONFIG_NET_BUF_DATA_SIZE) {
672 add_buffs_to_pkt(
673 rx_desc_meta->rx_pkt, buff1,
674 CONFIG_NET_BUF_DATA_SIZE, buff2,
675 (desc_data_len - CONFIG_NET_BUF_DATA_SIZE));
676 } else {
677 add_buffs_to_pkt(rx_desc_meta->rx_pkt, buff1,
678 desc_data_len, buff2, 0u);
679 }
680 /**
681 * Full packet received, submit to net sub system for
682 * further processing
683 */
684 err = net_recv_data(data->iface, rx_desc_meta->rx_pkt);
685 if (err) {
686 UPDATE_ETH_STATS_RX_ERROR_PKT_CNT(data, 1u);
687 net_pkt_unref(rx_desc_meta->rx_pkt);
688 LOG_DBG("%s: received packet dropped %d", dev->name,
689 err);
690 } else {
691 LOG_DBG("%s: received a packet", dev->name);
692 UPDATE_ETH_STATS_RX_BYTE_CNT(
693 data,
694 net_pkt_get_len(rx_desc_meta->rx_pkt));
695 }
696 } else {
697 LOG_ERR("%s: rx packet error", dev->name);
698 UPDATE_ETH_STATS_RX_ERROR_PKT_CNT(data, 1u);
699 net_pkt_unref(rx_desc_meta->rx_pkt);
700 }
701 rx_desc_meta->rx_pkt = (struct net_pkt *)NULL;
702 } else {
703 add_buffs_to_pkt(rx_desc_meta->rx_pkt, buff1,
704 CONFIG_NET_BUF_DATA_SIZE, buff2,
705 CONFIG_NET_BUF_DATA_SIZE);
706 }
707 } else {
708 LOG_ERR("%s: Received a buffer with no FD buffer received in the "
709 "sequence",
710 dev->name);
711 }
712 rx_desc_meta->next_to_read =
713 ((rx_desc_meta->next_to_read + 1) % dma_chnl_cfg->rdrl);
714 rx_desc = (struct xgmac_dma_rx_desc *)(fisrt_rx_desc + rx_desc_meta->next_to_read);
715 arch_dcache_invd_range(rx_desc, sizeof(rx_desc));
716 rx_desc_data = *(rx_desc);
717 }
718 }
719
tx_pkt_location_in_array(mem_addr_t * array_base,uint32_t dma_chnl,uint32_t tdrl,uint16_t desc_idx)720 static inline mem_addr_t *tx_pkt_location_in_array(mem_addr_t *array_base, uint32_t dma_chnl,
721 uint32_t tdrl, uint16_t desc_idx)
722 {
723 return (array_base + ((dma_chnl * tdrl) + desc_idx));
724 }
725
eth_dwc_xgmac_tx_irq_work(const struct device * dev,uint32_t dma_chnl)726 static void eth_dwc_xgmac_tx_irq_work(const struct device *dev, uint32_t dma_chnl)
727 {
728 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data;
729 const struct eth_dwc_xgmac_config *const config =
730 (struct eth_dwc_xgmac_config *)dev->config;
731 struct xgmac_dma_chnl_config *dma_chnl_cfg =
732 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg;
733 struct xgmac_dma_tx_desc_meta *tx_desc_meta =
734 (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl];
735 struct xgmac_dma_tx_desc *fisrt_tx_desc =
736 (struct xgmac_dma_tx_desc *)(data->dma_tx_desc + (dma_chnl * dma_chnl_cfg->tdrl));
737 struct xgmac_dma_tx_desc *tx_desc;
738 uint16_t desc_idx;
739 struct net_pkt *pkt;
740
741 desc_idx =
742 ((tx_desc_meta->next_to_use + k_sem_count_get(&tx_desc_meta->free_tx_descs_sem)) %
743 dma_chnl_cfg->tdrl);
744 for (; desc_idx != tx_desc_meta->next_to_use;
745 desc_idx = ((desc_idx + 1) % dma_chnl_cfg->tdrl)) {
746 tx_desc = (struct xgmac_dma_tx_desc *)(fisrt_tx_desc + desc_idx);
747 arch_dcache_invd_range(tx_desc, sizeof(tx_desc));
748 if (!(tx_desc->tdes3 & XGMAC_TDES3_OWN)) {
749 /* If LD bit of this descritor set then unreferance the TX packet */
750 if (tx_desc->tdes3 & XGMAC_TDES3_LD) {
751 pkt = (struct net_pkt *)(*tx_pkt_location_in_array(
752 data->tx_pkts, dma_chnl, dma_chnl_cfg->tdrl, desc_idx));
753
754 LOG_DBG("%s: %p packet unreferenced for after tx", dev->name, pkt);
755 net_pkt_unref(pkt);
756 *(tx_pkt_location_in_array(data->tx_pkts, dma_chnl,
757 dma_chnl_cfg->tdrl, desc_idx)) =
758 (mem_addr_t)NULL;
759 }
760 /* reset the descriptor content */
761 tx_desc->tdes0 = 0u;
762 tx_desc->tdes1 = 0u;
763 tx_desc->tdes2 = 0u;
764 tx_desc->tdes3 = 0u;
765 arch_dcache_flush_range(tx_desc, sizeof(tx_desc));
766 k_sem_give(&tx_desc_meta->free_tx_descs_sem);
767 } else {
768 break;
769 }
770 }
771 }
772
eth_dwc_xgmac_dmach_isr(const struct device * dev,uint32_t dmach_interrupt_sts,uint32_t dma_chnl)773 static void eth_dwc_xgmac_dmach_isr(const struct device *dev, uint32_t dmach_interrupt_sts,
774 uint32_t dma_chnl)
775 {
776 if (dmach_interrupt_sts & DMA_CHx_STATUS_TI_SET_MSK) {
777 /* Tranmit interrupt */
778 eth_dwc_xgmac_tx_irq_work(dev, dma_chnl);
779 }
780 if (dmach_interrupt_sts & DMA_CHx_STATUS_RI_SET_MSK) {
781 /* Receive interrupt */
782 eth_dwc_xgmac_rx_irq_work(dev, dma_chnl);
783 /* Transmit buffer unavailable interrupt*/
784 LOG_DBG("%s: DMA channel %d Rx interrupt", dev->name, dma_chnl);
785 }
786 if (dmach_interrupt_sts & DMA_CHx_STATUS_TPS_SET_MSK) {
787 /* Transmit process stopped interrupt*/
788 LOG_ERR("%s: DMA channel %d Transmit process stopped", dev->name, dma_chnl);
789 }
790 if (dmach_interrupt_sts & DMA_CHx_STATUS_TBU_SET_MSK) {
791 /* Transmit buffer unavailable interrupt*/
792 LOG_DBG("%s: DMA channel %d Transmit buffer unavailable", dev->name, dma_chnl);
793 }
794 if (dmach_interrupt_sts & DMA_CHx_STATUS_RBU_SET_MSK) {
795 /* Receive buffer unavailable interrupt*/
796 LOG_ERR("%s: DMA channel %d Receive buffer unavailable", dev->name, dma_chnl);
797 }
798 if (dmach_interrupt_sts & DMA_CHx_STATUS_RPS_SET_MSK) {
799 /* Receive process stopped interrupt*/
800 LOG_ERR("%s: DMA channel %d Receive process stopped", dev->name, dma_chnl);
801 }
802 if (dmach_interrupt_sts & DMA_CHx_STATUS_DDE_SET_MSK) {
803 /* Descriptor definition error interrupt*/
804 LOG_ERR("%s: DMA channel %d Descriptor definition error", dev->name, dma_chnl);
805 }
806 if (dmach_interrupt_sts & DMA_CHx_STATUS_FBE_SET_MSK) {
807 /* Fatal bus error interrupt*/
808 LOG_ERR("%s: DMA channel %d Fatal bus error", dev->name, dma_chnl);
809 }
810 if (dmach_interrupt_sts & DMA_CHx_STATUS_CDE_SET_MSK) {
811 /* Context descriptor error interrupt*/
812 LOG_ERR("%s: DMA channel %d Context descriptor error", dev->name, dma_chnl);
813 }
814 if (dmach_interrupt_sts & DMA_CHx_STATUS_AIS_SET_MSK) {
815 /* Abnormal interrupt status interrupt*/
816 LOG_ERR("%s: DMA channel %d Abnormal error", dev->name, dma_chnl);
817 }
818 }
819
eth_dwc_xgmac_mtl_isr(const struct device * dev,uint32_t mtl_interrupt_sts)820 static inline void eth_dwc_xgmac_mtl_isr(const struct device *dev, uint32_t mtl_interrupt_sts)
821 {
822 ARG_UNUSED(dev);
823 ARG_UNUSED(mtl_interrupt_sts);
824 /* Handle MTL interrupts */
825 }
826
eth_dwc_xgmac_mac_isr(const struct device * dev,uint32_t mac_interrupt_sts)827 static inline void eth_dwc_xgmac_mac_isr(const struct device *dev, uint32_t mac_interrupt_sts)
828 {
829 ARG_UNUSED(dev);
830 ARG_UNUSED(mac_interrupt_sts);
831 /* Handle MAC interrupts */
832 }
833
834 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
835 /**
836 * @brief Handler function for bottom half processing which got
837 * submitted to work queue in the interrupt handler.
838 *
839 * @param item Pointer to the work item
840 */
eth_dwc_xgmac_irq_work(struct k_work * item)841 static void eth_dwc_xgmac_irq_work(struct k_work *item)
842 {
843 struct eth_dwc_xgmac_dev_data *const data =
844 CONTAINER_OF(item, struct eth_dwc_xgmac_dev_data, isr_work);
845 struct xgmac_irq_cntxt_data *cntxt_data =
846 (struct xgmac_irq_cntxt_data *)&data->irq_cntxt_data;
847 const struct device *dev = cntxt_data->dev;
848 const struct eth_dwc_xgmac_config *const config =
849 (struct eth_dwc_xgmac_config *)dev->config;
850 uint32_t dma_chnl_interrupt_sts = 0u;
851
852 for (uint32_t x = 0; x < config->num_dma_chnl; x++) {
853 if (cntxt_data->dma_interrupt_sts & BIT(x)) {
854 dma_chnl_interrupt_sts = cntxt_data->dma_chnl_interrupt_sts[x];
855 cntxt_data->dma_chnl_interrupt_sts[x] ^= dma_chnl_interrupt_sts;
856 eth_dwc_xgmac_dmach_isr(dev, dma_chnl_interrupt_sts, x);
857 WRITE_BIT(cntxt_data->dma_interrupt_sts, x, 0);
858 }
859 }
860 }
861 #endif
862 /**
863 * @brief XGMAC interrupt service routine
864 * XGMAC interrupt service routine. Checks for indications of errors
865 * and either immediately handles RX pending / TX complete notifications
866 * or defers them to the system work queue.
867 *
868 * @param dev Pointer to the ethernet device
869 */
eth_dwc_xgmac_isr(const struct device * dev)870 static void eth_dwc_xgmac_isr(const struct device *dev)
871 {
872 const struct eth_dwc_xgmac_config *const config =
873 (struct eth_dwc_xgmac_config *)dev->config;
874 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data;
875 struct xgmac_irq_cntxt_data *cntxt_data =
876 (struct xgmac_irq_cntxt_data *)&data->irq_cntxt_data;
877 uint32_t dma_int_status = 0u;
878 uint32_t dmach_interrupt_sts = 0u;
879 mem_addr_t ioaddr = get_reg_base_addr(dev);
880 mem_addr_t reg_addr;
881 uint32_t reg_val;
882
883 if (!data->dev_started || data->link_speed == LINK_DOWN ||
884 (!net_if_flag_is_set(data->iface, NET_IF_UP))) {
885 dma_int_status =
886 sys_read32(ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_INTERRUPT_STATUS_OFST);
887 for (uint32_t x = 0; x < config->num_dma_chnl; x++) {
888 if (dma_int_status & BIT(x)) {
889 LOG_ERR("%s ignoring dma ch %d interrupt: %x ", dev->name, x,
890 sys_read32(ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(x) +
891 DMA_CHx_STATUS_OFST));
892 reg_val = DMA_CHx_STATUS_NIS_SET_MSK | DMA_CHx_STATUS_AIS_SET_MSK |
893 DMA_CHx_STATUS_CDE_SET_MSK | DMA_CHx_STATUS_FBE_SET_MSK |
894 DMA_CHx_STATUS_DDE_SET_MSK | DMA_CHx_STATUS_RPS_SET_MSK |
895 DMA_CHx_STATUS_RBU_SET_MSK | DMA_CHx_STATUS_TBU_SET_MSK |
896 DMA_CHx_STATUS_TPS_SET_MSK | DMA_CHx_STATUS_RI_SET_MSK |
897 DMA_CHx_STATUS_TI_SET_MSK;
898 sys_write32(reg_val, (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(x) +
899 DMA_CHx_STATUS_OFST));
900 }
901 }
902
903 LOG_ERR("%s ignoring xgmac interrupt: device not started,link is down or network "
904 "interface is not up",
905 dev->name);
906
907 return;
908 }
909
910 /* Interrupt Top half processing
911 */
912 reg_addr = ioaddr + XGMAC_DMA_BASE_ADDR_OFFSET + DMA_INTERRUPT_STATUS_OFST;
913 /**
914 * Only set the interrupt, do not overwrite the interrupt status stored in the context.
915 * The status will be cleared once the corresponding action is completed in the work item
916 */
917 cntxt_data->dma_interrupt_sts |= sys_read32(reg_addr);
918 for (uint32_t x = 0; x < config->num_dma_chnl; x++) {
919 if (cntxt_data->dma_interrupt_sts & BIT(x)) {
920 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(x) +
921 DMA_CHx_STATUS_OFST);
922 dmach_interrupt_sts = sys_read32(reg_addr);
923 sys_write32(dmach_interrupt_sts, reg_addr);
924 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
925 /**
926 * Only set the interrupt, do not overwrite the interrupt status stored in
927 * the context. The status will be cleared once the corresponding action is
928 * done in the work item
929 */
930 cntxt_data->dma_chnl_interrupt_sts[x] |= dmach_interrupt_sts;
931 #else
932 eth_dwc_xgmac_dmach_isr(dev, dmach_interrupt_sts, x);
933 WRITE_BIT(cntxt_data->dma_interrupt_sts, x, 0);
934 #endif
935 }
936 }
937
938 reg_addr = ioaddr + XGMAC_MTL_BASE_ADDR_OFFSET + MTL_INTERRUPT_STATUS_OFST;
939 reg_val = sys_read32(reg_addr);
940 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
941 cntxt_data->mtl_interrupt_sts |= reg_val;
942 #else
943 eth_dwc_xgmac_mtl_isr(dev, reg_val);
944 #endif
945
946 reg_addr = ioaddr + XGMAC_CORE_BASE_ADDR_OFFSET + CORE_MAC_INTERRUPT_STATUS_OFST;
947 reg_val = sys_read32(reg_addr);
948 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
949 cntxt_data->mac_interrupt_sts |= reg_val;
950 #else
951 eth_dwc_xgmac_mac_isr(dev, reg_val);
952 #endif
953
954 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
955 /* submitting work item to work queue for interrupt bottom half processing. */
956 k_work_submit(&data->isr_work);
957 #endif
958 }
959 /**
960 * @brief This is the expiry function which will be called from the
961 * system timer irq handler upon configured isr polling timer expires.
962 *
963 * @param timer Pointer to the timer object
964 */
965 #ifdef CONFIG_ETH_DWC_XGMAC_POLLING_MODE
eth_dwc_xgmac_irq_poll(struct k_timer * timer)966 static void eth_dwc_xgmac_irq_poll(struct k_timer *timer)
967 {
968 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)CONTAINER_OF(
969 timer, struct eth_dwc_xgmac_dev_data, isr_polling_timer);
970 const struct device *dev = dev_data->irq_cntxt_data.dev;
971
972 eth_dwc_xgmac_isr(dev);
973 }
974 #endif /* CONFIG_ETH_DWC_XGMAC_POLLING_MODE */
975
976 /**
977 * @brief XGMAC device initialization function
978 * Initializes the XGMAC itself, the DMA memory area used by the XGMAC.
979 *
980 * @param dev Pointer to the ethernet device
981 * @retval 0 device initialization completed successfully
982 */
eth_dwc_xgmac_dev_init(const struct device * dev)983 static int eth_dwc_xgmac_dev_init(const struct device *dev)
984 {
985 const struct eth_dwc_xgmac_config *const config =
986 (struct eth_dwc_xgmac_config *)dev->config;
987 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data;
988 mem_addr_t ioaddr;
989
990 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
991 ioaddr = get_reg_base_addr(dev);
992
993 /* Initialization procedure as described in the dwc xgmac 10G Ethernet MAC data book. */
994
995 dwxgmac_dma_init(dev, &config->dma_cfg);
996
997 dwxgmac_dma_desc_init(config, data);
998
999 dwxgmac_dma_chnl_init(dev, config, data);
1000
1001 dwxgmac_dma_mtl_init(dev, config);
1002
1003 dwxgmac_mac_init(dev, config, data);
1004
1005 /* set MAC address */
1006 if (config->random_mac_address == true) {
1007 /**
1008 * The default MAC address configured in the device tree shall
1009 * contain the OUI octets.
1010 */
1011 gen_random_mac(data->mac_addr, data->mac_addr[MAC_ADDR_BYTE_0],
1012 data->mac_addr[MAC_ADDR_BYTE_1], data->mac_addr[MAC_ADDR_BYTE_2]);
1013 }
1014 dwxgmac_set_mac_addr_by_idx(dev, data->mac_addr, 0, false);
1015
1016 dwxgmac_irq_init(dev);
1017 LOG_DBG("XGMAC ethernet driver init done");
1018 return 0;
1019 }
1020
phy_link_state_change_callback(const struct device * phy_dev,struct phy_link_state * state,void * user_data)1021 static void phy_link_state_change_callback(const struct device *phy_dev,
1022 struct phy_link_state *state, void *user_data)
1023 {
1024 ARG_UNUSED(phy_dev);
1025 const struct device *mac_dev = (const struct device *)user_data;
1026 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)mac_dev->data;
1027 bool is_up = state->is_up;
1028
1029 if (is_up) {
1030 /* Announce link up status */
1031 switch (state->speed) {
1032 case LINK_HALF_1000BASE_T:
1033 case LINK_FULL_1000BASE_T:
1034 dev_data->link_speed = LINK_1GBIT;
1035 break;
1036 case LINK_HALF_100BASE_T:
1037 case LINK_FULL_100BASE_T:
1038 dev_data->link_speed = LINK_100MBIT;
1039 break;
1040 case LINK_HALF_10BASE_T:
1041 case LINK_FULL_10BASE_T:
1042 default:
1043 dev_data->link_speed = LINK_10MBIT;
1044 }
1045 /* Configure MAC link speed */
1046 eth_dwc_xgmac_update_link_speed(mac_dev, dev_data->link_speed);
1047 /* Set up link */
1048 net_eth_carrier_on(dev_data->iface);
1049 LOG_DBG("%s: Link up", mac_dev->name);
1050
1051 } else {
1052 dev_data->link_speed = LINK_DOWN;
1053 /* Announce link down status */
1054 net_eth_carrier_off(dev_data->iface);
1055 LOG_DBG("%s: Link down", mac_dev->name);
1056 }
1057 }
1058
eth_dwc_xgmac_prefill_rx_desc(const struct device * dev)1059 void eth_dwc_xgmac_prefill_rx_desc(const struct device *dev)
1060 {
1061 /**
1062 * Every RX descriptor in the descriptor ring, needs to be prefilled with 2 RX
1063 * buffer addresses and put it to DMA ownership by setting the OWN bit. When new
1064 * data is received the DMA will check the OWN bit and moves the data to
1065 * corresponding recive buffers and puts the RX descriptor to application ownership
1066 * by clearing the OWN bit. If received data size is more than total of 2 buffer
1067 * sizes then DMA will use next descriptor in the ring.
1068 */
1069 struct eth_dwc_xgmac_dev_data *const dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1070 const struct eth_dwc_xgmac_config *const dev_conf =
1071 (struct eth_dwc_xgmac_config *)dev->config;
1072 struct xgmac_dma_chnl_config *const dma_chnl_cfg =
1073 (struct xgmac_dma_chnl_config *)&dev_conf->dma_chnl_cfg;
1074 struct xgmac_dma_tx_desc_meta *tx_desc_meta;
1075 struct xgmac_dma_rx_desc_meta *rx_desc_meta;
1076 struct xgmac_dma_rx_desc *rx_desc = NULL;
1077 mem_addr_t reg_addr;
1078 uint32_t reg_val;
1079 mem_addr_t ioaddr;
1080 mem_addr_t *rx_buffs;
1081 uint16_t desc_id = 0u;
1082
1083 ioaddr = get_reg_base_addr(dev);
1084 /* Reserve the RX buffers and fill the RX descriptors with buffer addresses */
1085 for (uint32_t dma_chnl = 0u; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) {
1086 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&dev_data->tx_desc_meta[dma_chnl];
1087 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&dev_data->rx_desc_meta[dma_chnl];
1088 /* Initialize semaphores and mutex for the RX/TX descriptor rings */
1089 k_sem_init(&tx_desc_meta->free_tx_descs_sem, (dma_chnl_cfg->tdrl),
1090 (dma_chnl_cfg->tdrl));
1091 k_mutex_init(&tx_desc_meta->ring_lock);
1092 for (; desc_id < dma_chnl_cfg->rdrl; desc_id++) {
1093 rx_desc = (struct xgmac_dma_rx_desc *)(dev_data->dma_rx_desc +
1094 (dma_chnl * dma_chnl_cfg->rdrl) +
1095 desc_id);
1096 rx_buffs = (mem_addr_t *)(dev_data->rx_buffs +
1097 (((dma_chnl * dma_chnl_cfg->rdrl) + desc_id) *
1098 RX_FRAGS_PER_DESC));
1099 rx_buffs[RX_FRAG_ONE] = (mem_addr_t)net_pkt_get_reserve_rx_data(
1100 CONFIG_NET_BUF_DATA_SIZE, K_FOREVER);
1101 if (!rx_buffs[RX_FRAG_ONE]) {
1102 LOG_ERR("%s: Failed to allocate a network buffer to fill "
1103 "the "
1104 "RxDesc[%d]",
1105 dev->name, desc_id);
1106 break;
1107 }
1108 arch_dcache_invd_range(rx_desc, sizeof(rx_desc));
1109 rx_desc->rdes0 =
1110 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_ONE])->data);
1111 rx_desc->rdes1 =
1112 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_ONE])->data) >>
1113 32u;
1114 rx_buffs[RX_FRAG_TWO] = (mem_addr_t)net_pkt_get_reserve_rx_data(
1115 CONFIG_NET_BUF_DATA_SIZE, K_FOREVER);
1116 if (!rx_buffs[RX_FRAG_TWO]) {
1117 net_pkt_frag_unref((struct net_buf *)(rx_buffs[RX_FRAG_ONE]));
1118 LOG_ERR("%s: Failed to allocate a network buffer to fill "
1119 "the "
1120 "RxDesc[%d]",
1121 dev->name, desc_id);
1122 break;
1123 }
1124 rx_desc->rdes2 =
1125 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_TWO])->data);
1126 rx_desc->rdes3 =
1127 XGMAC_RDES3_OWN | XGMAC_RDES3_IOC |
1128 (POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_TWO])->data) >>
1129 32u);
1130 arch_dcache_flush_range(rx_desc, sizeof(rx_desc));
1131 rx_desc_meta->desc_tail_addr = (mem_addr_t)(rx_desc + 1);
1132 }
1133 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1134 DMA_CHx_RXDESC_TAIL_LPOINTER_OFST);
1135 reg_val = DMA_CHx_RXDESC_TAIL_LPOINTER_RDT_SET(rx_desc_meta->desc_tail_addr);
1136 sys_write32(reg_val, reg_addr);
1137 LOG_DBG("%s: DMA channel %d Rx descriptors initialization completed", dev->name,
1138 dma_chnl);
1139 }
1140 }
1141
1142 /**
1143 * @brief XGMAC associated interface initialization function
1144 * Initializes the interface associated with a XGMAC device.
1145 *
1146 * @param iface Pointer to the associated interface data struct
1147 */
eth_dwc_xgmac_iface_init(struct net_if * iface)1148 static void eth_dwc_xgmac_iface_init(struct net_if *iface)
1149 {
1150 const struct device *dev = net_if_get_device(iface);
1151 const struct eth_dwc_xgmac_config *const dev_conf =
1152 (struct eth_dwc_xgmac_config *)dev->config;
1153 struct eth_dwc_xgmac_dev_data *const dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1154
1155 k_mutex_init(&dev_data->dev_cfg_lock);
1156 #ifdef CONFIG_ETH_DWC_XGMAC_BOTTOM_HALF_WORK_QUEUE
1157 /* Initialize the (delayed) work item for RX pending, TX done */
1158 k_work_init(&(dev_data->isr_work), eth_dwc_xgmac_irq_work);
1159 #endif
1160
1161 #ifdef CONFIG_ETH_DWC_XGMAC_POLLING_MODE
1162 k_timer_init(&dev_data->isr_polling_timer, eth_dwc_xgmac_irq_poll, NULL);
1163 #else
1164 dev_conf->irq_config_fn(dev);
1165 #endif
1166
1167 eth_dwc_xgmac_prefill_rx_desc(dev);
1168
1169 /* Set the initial contents of the current instance's run-time data */
1170 dev_data->iface = iface;
1171 (void)net_if_set_link_addr(iface, dev_data->mac_addr, ETH_MAC_ADDRESS_SIZE,
1172 NET_LINK_ETHERNET);
1173 net_if_carrier_off(iface);
1174 ethernet_init(iface);
1175 net_if_set_mtu(iface, dev_conf->mtu);
1176 LOG_DBG("%s: MTU size is set to %d", dev->name, dev_conf->mtu);
1177 if (device_is_ready(dev_conf->phy_dev)) {
1178 phy_link_callback_set(dev_conf->phy_dev, &phy_link_state_change_callback,
1179 (void *)dev);
1180 } else {
1181 LOG_ERR("%s: PHY device not ready", dev->name);
1182 }
1183 LOG_DBG("%s: Ethernet iface init done binded to iface@0x%p", dev->name, iface);
1184 }
1185
1186 /**
1187 * @brief XGMAC device start function
1188 * XGMAC device start function. Clears all status registers and any
1189 * pending interrupts, enables RX and TX, enables interrupts.
1190 *
1191 * @param dev Pointer to the ethernet device
1192 * @retval 0 upon successful completion
1193 */
eth_dwc_xgmac_start_device(const struct device * dev)1194 static int eth_dwc_xgmac_start_device(const struct device *dev)
1195 {
1196 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config;
1197 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1198 mem_addr_t ioaddr;
1199 mem_addr_t reg_addr;
1200 uint32_t reg_val;
1201
1202 if (dev_data->dev_started) {
1203 LOG_DBG("Eth device already started");
1204 return 0;
1205 }
1206
1207 ioaddr = get_reg_base_addr(dev);
1208
1209 for (uint32_t dma_chnl = 0u; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) {
1210 /* Start the transmit DMA channel */
1211 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1212 DMA_CHx_TX_CONTROL_OFST);
1213 reg_val = sys_read32(reg_addr) | DMA_CHx_TX_CONTROL_ST_SET_MSK;
1214 sys_write32(reg_val, reg_addr);
1215 /* Start the receive DMA channel */
1216 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1217 DMA_CHx_RX_CONTROL_OFST);
1218 reg_val = sys_read32(reg_addr) | DMA_CHx_RX_CONTROL_SR_SET_MSK;
1219 sys_write32(reg_val, reg_addr);
1220 /* Enable the dma channel interrupts */
1221 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1222 DMA_CHx_INTERRUPT_ENABLE_OFST);
1223 reg_val = DMA_CHx_INTERRUPT_ENABLE_NIE_SET(1u) |
1224 DMA_CHx_INTERRUPT_ENABLE_AIE_SET(1u) |
1225 DMA_CHx_INTERRUPT_ENABLE_CDEE_SET(1u) |
1226 DMA_CHx_INTERRUPT_ENABLE_FBEE_SET(1u) |
1227 DMA_CHx_INTERRUPT_ENABLE_DDEE_SET(1u) |
1228 DMA_CHx_INTERRUPT_ENABLE_RSE_SET(1u) |
1229 DMA_CHx_INTERRUPT_ENABLE_RBUE_SET(1u) |
1230 DMA_CHx_INTERRUPT_ENABLE_RIE_SET(1u) |
1231 DMA_CHx_INTERRUPT_ENABLE_TBUE_SET(1u) |
1232 DMA_CHx_INTERRUPT_ENABLE_TXSE_SET(1u) |
1233 DMA_CHx_INTERRUPT_ENABLE_TIE_SET(1u);
1234 sys_write32(reg_val, reg_addr);
1235 LOG_DBG("%s: Interrupts enabled for DMA Channel %d", dev->name, dma_chnl);
1236 }
1237 /* Enable the MAC transmit functionality*/
1238 reg_val = sys_read32(ioaddr + CORE_MAC_TX_CONFIGURATION_OFST);
1239 reg_val |= CORE_MAC_TX_CONFIGURATION_TE_SET(1u);
1240 sys_write32(reg_val, (ioaddr + CORE_MAC_TX_CONFIGURATION_OFST));
1241 /* Enable the MAC receive functionality*/
1242 reg_val = sys_read32(ioaddr + CORE_MAC_RX_CONFIGURATION_OFST);
1243 reg_val |= CORE_MAC_RX_CONFIGURATION_RE_SET(1u);
1244 sys_write32(reg_val, (ioaddr + CORE_MAC_RX_CONFIGURATION_OFST));
1245 /* Enable the MAC Link Status Change Interrupt */
1246 reg_val = sys_read32(ioaddr + CORE_MAC_INTERRUPT_ENABLE_OFST);
1247 reg_val = CORE_MAC_INTERRUPT_ENABLE_LSIE_SET(0u);
1248 sys_write32(reg_val, (ioaddr + CORE_MAC_INTERRUPT_ENABLE_OFST));
1249
1250 #ifdef CONFIG_ETH_DWC_XGMAC_POLLING_MODE
1251 /* If polling mode is configured then start the ISR polling timer */
1252 k_timer_start(&dev_data->isr_polling_timer,
1253 K_USEC(CONFIG_ETH_DWC_XGMAC_INTERRUPT_POLLING_INTERVAL_US),
1254 K_USEC(CONFIG_ETH_DWC_XGMAC_INTERRUPT_POLLING_INTERVAL_US));
1255 #else
1256 dev_conf->irq_enable_fn(dev, true);
1257 #endif
1258
1259 dev_data->dev_started = true;
1260 LOG_DBG("%s: Device started", dev->name);
1261 return 0;
1262 }
1263
1264 /**
1265 * @brief XGMAC device stop function
1266 * XGMAC device stop function. Disables all interrupts, disables
1267 * RX and TX, clears all status registers.
1268 *
1269 * @param dev Pointer to the ethernet device
1270 * @retval 0 upon successful completion
1271 */
eth_dwc_xgmac_stop_device(const struct device * dev)1272 static int eth_dwc_xgmac_stop_device(const struct device *dev)
1273 {
1274 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config;
1275 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1276 mem_addr_t ioaddr;
1277 mem_addr_t reg_addr;
1278 uint32_t reg_val;
1279
1280 if (!dev_data->dev_started) {
1281 LOG_DBG("Eth device already stopped");
1282 return 0;
1283 }
1284 dev_data->dev_started = false;
1285
1286 ioaddr = get_reg_base_addr(dev);
1287
1288 for (uint32_t dma_chnl = 0; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) {
1289 /* Stop the transmit DMA channel */
1290 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1291 DMA_CHx_TX_CONTROL_OFST);
1292 reg_val = sys_read32(reg_addr) & DMA_CHx_TX_CONTROL_ST_CLR_MSK;
1293 sys_write32(reg_val, reg_addr);
1294 /* Stop the receive DMA channel */
1295 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1296 DMA_CHx_RX_CONTROL_OFST);
1297 reg_val = sys_read32(reg_addr) & DMA_CHx_RX_CONTROL_SR_CLR_MSK;
1298 sys_write32(reg_val, reg_addr);
1299 /* Disable the dma channel interrupts */
1300 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1301 DMA_CHx_INTERRUPT_ENABLE_OFST);
1302 reg_val = 0u;
1303 sys_write32(reg_val, reg_addr);
1304 LOG_DBG("%s: Interrupts disabled for DMA Channel %d", dev->name, dma_chnl);
1305 }
1306 /* Disable the MAC transmit functionality */
1307 reg_val = sys_read32(ioaddr + CORE_MAC_TX_CONFIGURATION_OFST);
1308 reg_val &= CORE_MAC_TX_CONFIGURATION_TE_CLR_MSK;
1309 sys_write32(reg_val, (ioaddr + CORE_MAC_TX_CONFIGURATION_OFST));
1310 /* Disable the MAC receive functionality */
1311 reg_val = sys_read32(ioaddr + CORE_MAC_RX_CONFIGURATION_OFST);
1312 reg_val &= CORE_MAC_RX_CONFIGURATION_RE_CLR_MSK;
1313 sys_write32(reg_val, (ioaddr + CORE_MAC_RX_CONFIGURATION_OFST));
1314 /* Disable the MAC interrupts */
1315 reg_addr = ioaddr + CORE_MAC_INTERRUPT_ENABLE_OFST;
1316 reg_val = 0u;
1317 sys_write32(reg_val, reg_addr);
1318
1319 #ifdef CONFIG_ETH_DWC_XGMAC_POLLING_MODE
1320 /* If polling mode is configured then stop the ISR polling timer */
1321 k_timer_stop(&dev_data->isr_polling_timer);
1322 #else
1323 /* If interrupt mode is configured the disable ISR in interrupt controller */
1324 dev_conf->irq_enable_fn(dev, false);
1325 #endif
1326 LOG_DBG("%s: Device stopped", dev->name);
1327 return 0;
1328 }
1329
update_desc_tail_ptr(const struct device * dev,uint8_t dma_chnl,uint32_t desc_tail_addr)1330 static inline void update_desc_tail_ptr(const struct device *dev, uint8_t dma_chnl,
1331 uint32_t desc_tail_addr)
1332 {
1333 mem_addr_t reg_addr, ioaddr = get_reg_base_addr(dev);
1334 uint32_t reg_val;
1335
1336 reg_addr = (ioaddr + XGMAC_DMA_CHNLx_BASE_ADDR_OFFSET(dma_chnl) +
1337 DMA_CHx_TXDESC_TAIL_LPOINTER_OFST);
1338 reg_val = DMA_CHx_TXDESC_TAIL_LPOINTER_TDT_SET(desc_tail_addr);
1339 sys_write32(reg_val, reg_addr);
1340 }
1341 /**
1342 * @brief XGMAC data send function
1343 * XGMAC data send function. Blocks until a TX complete notification has been
1344 * received & processed.
1345 *
1346 * @param dev Pointer to the ethernet device
1347 * @param packet Pointer to the data packet to be sent
1348 * @retval -EINVAL in case of invalid parameters, e.g. zero data length
1349 * @retval -EIO in case of:
1350 * (1) the attempt to TX data while the device is stopped,
1351 * the interface is down or the link is down,
1352 * @retval -ETIMEDOUT in case of:
1353 * (1) the attempt to TX data while no free buffers are available
1354 * in the DMA memory area,
1355 * (2) the transmission completion notification timing out
1356 * @retval -EBUSY in case of:
1357 * (1) the Tx desc ring lock is acquired within timeout
1358 * @retval 0 if the packet was transmitted successfully
1359
1360 */
eth_dwc_xgmac_send(const struct device * dev,struct net_pkt * pkt)1361 static int eth_dwc_xgmac_send(const struct device *dev, struct net_pkt *pkt)
1362 {
1363 int ret;
1364 struct xgmac_tx_cntxt context;
1365 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config;
1366 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1367 struct xgmac_dma_chnl_config *dma_ch_cfg =
1368 (struct xgmac_dma_chnl_config *)&dev_conf->dma_chnl_cfg;
1369 uint32_t tdes2_flgs, tdes3_flgs, tdes3_fd_flg;
1370
1371 if (!pkt || !pkt->frags) {
1372 LOG_ERR("%s: cannot TX, invalid argument", dev->name);
1373 return -EINVAL;
1374 }
1375
1376 if (net_pkt_get_len(pkt) == 0) {
1377 LOG_ERR("%s cannot TX, zero packet length", dev->name);
1378 UPDATE_ETH_STATS_TX_ERROR_PKT_CNT(dev_data, 1u);
1379 return -EINVAL;
1380 }
1381
1382 if (!dev_data->dev_started || dev_data->link_speed == LINK_DOWN ||
1383 (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) {
1384 LOG_ERR("%s cannot TX, due to any of these reasons, device not started,link is "
1385 "down or network interface is not up",
1386 dev->name);
1387 UPDATE_ETH_STATS_TX_DROP_PKT_CNT(dev_data, 1u);
1388 return -EIO;
1389 }
1390
1391 context.q_id = net_tx_priority2tc(net_pkt_priority(pkt));
1392 context.descmeta = (struct xgmac_dma_tx_desc_meta *)&dev_data->tx_desc_meta[context.q_id];
1393 context.pkt_desc_id = context.descmeta->next_to_use;
1394 /* lock the TX desc ring while acquiring the resources */
1395 (void)k_mutex_lock(&(context.descmeta->ring_lock), K_FOREVER);
1396 (void)net_pkt_ref(pkt);
1397 LOG_DBG("%s: %p packet referanced for tx", dev->name, pkt);
1398 tdes3_fd_flg = XGMAC_TDES3_FD;
1399 for (struct net_buf *frag = pkt->frags; frag; frag = frag->frags) {
1400 ret = k_sem_take(&context.descmeta->free_tx_descs_sem, K_MSEC(1));
1401 if (ret != 0) {
1402 LOG_DBG("%s: enough free tx descriptors are not available", dev->name);
1403 goto abort_tx;
1404 }
1405 context.tx_desc = (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc +
1406 (context.q_id * dma_ch_cfg->tdrl) +
1407 context.pkt_desc_id);
1408 arch_dcache_invd_range(context.tx_desc, sizeof(context.tx_desc));
1409 arch_dcache_flush_range(frag->data, CONFIG_NET_BUF_DATA_SIZE);
1410 context.tx_desc->tdes0 = (uint32_t)POINTER_TO_UINT(frag->data);
1411 context.tx_desc->tdes1 = (uint32_t)(POINTER_TO_UINT(frag->data) >> 32u);
1412 tdes2_flgs = frag->len;
1413 tdes3_flgs = XGMAC_TDES3_OWN | tdes3_fd_flg |
1414 #ifdef CONFIG_ETH_DWC_XGMAC_TX_CS_OFFLOAD
1415 XGMAC_TDES3_CS_EN_MSK |
1416 #endif
1417 net_pkt_get_len(pkt);
1418 tdes3_fd_flg = 0;
1419
1420 if (!frag->frags) { /* check last fragment of the packet */
1421 /* Set interrupt on completion for last fragment descriptor */
1422 tdes3_flgs |= XGMAC_TDES3_LD;
1423 tdes2_flgs |= XGMAC_TDES2_IOC;
1424 /**
1425 * pin the transmitted packet address. This packet will get unpin after
1426 * getting transmitted by HW.
1427 */
1428 *(dev_data->tx_pkts + ((context.q_id * dma_ch_cfg->tdrl) +
1429 context.pkt_desc_id)) = (mem_addr_t)pkt;
1430 context.descmeta->desc_tail_addr =
1431 (mem_addr_t)POINTER_TO_UINT(context.tx_desc + 1);
1432 }
1433
1434 context.tx_desc->tdes2 = tdes2_flgs;
1435 context.tx_desc->tdes3 = tdes3_flgs;
1436 arch_dcache_flush_range(context.tx_desc, sizeof(context.tx_desc));
1437 context.pkt_desc_id = ((context.pkt_desc_id + 1) % dma_ch_cfg->tdrl);
1438 }
1439 context.descmeta->next_to_use = context.pkt_desc_id;
1440
1441 if (context.descmeta->desc_tail_addr ==
1442 (mem_addr_t)POINTER_TO_UINT(
1443 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc +
1444 (context.q_id * dma_ch_cfg->tdrl) +
1445 dma_ch_cfg->tdrl))) {
1446 context.descmeta->desc_tail_addr = (mem_addr_t)POINTER_TO_UINT(
1447 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc +
1448 (context.q_id * dma_ch_cfg->tdrl)));
1449 }
1450
1451 /* Update the descriptor tail pointer to DMA channel */
1452 update_desc_tail_ptr(dev, context.q_id, (uint32_t)context.descmeta->desc_tail_addr);
1453 /* unlock the TX desc ring */
1454 (void)k_mutex_unlock(&(context.descmeta->ring_lock));
1455
1456 UPDATE_ETH_STATS_TX_BYTE_CNT(dev_data, net_pkt_get_len(pkt));
1457 UPDATE_ETH_STATS_TX_PKT_CNT(dev_data, 1u);
1458
1459 return 0;
1460
1461 abort_tx:
1462 /* Aborting the packet transmission and return error code */
1463 for (uint16_t desc_id = context.descmeta->next_to_use; desc_id != context.pkt_desc_id;
1464 desc_id = ((desc_id + 1) % dma_ch_cfg->tdrl)) {
1465 context.tx_desc =
1466 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc +
1467 (context.q_id * dma_ch_cfg->tdrl) + desc_id);
1468 context.tx_desc->tdes0 = 0u;
1469 context.tx_desc->tdes1 = 0u;
1470 context.tx_desc->tdes2 = 0u;
1471 context.tx_desc->tdes3 = 0u;
1472 k_sem_give(&context.descmeta->free_tx_descs_sem);
1473 }
1474 (void)k_mutex_unlock(&(context.descmeta->ring_lock));
1475 LOG_DBG("%s: %p packet unreferenced after dropping", dev->name, pkt);
1476 net_pkt_unref(pkt);
1477 UPDATE_ETH_STATS_TX_DROP_PKT_CNT(dev_data, 1u);
1478 return -EIO;
1479 }
1480
get_phy_adv_speeds(bool auto_neg,bool duplex_mode,enum eth_dwc_xgmac_link_speed link_speed)1481 static enum phy_link_speed get_phy_adv_speeds(bool auto_neg, bool duplex_mode,
1482 enum eth_dwc_xgmac_link_speed link_speed)
1483 {
1484 enum phy_link_speed adv_speeds = 0u;
1485
1486 if (auto_neg) {
1487 adv_speeds = LINK_HALF_1000BASE_T | LINK_HALF_1000BASE_T | LINK_HALF_100BASE_T |
1488 LINK_FULL_100BASE_T | LINK_HALF_10BASE_T | LINK_FULL_10BASE_T;
1489 } else {
1490 if (duplex_mode) {
1491 switch (link_speed) {
1492 case LINK_1GBIT:
1493 adv_speeds = LINK_FULL_1000BASE_T;
1494 break;
1495 case LINK_100MBIT:
1496 adv_speeds = LINK_FULL_100BASE_T;
1497 break;
1498 default:
1499 adv_speeds = LINK_FULL_10BASE_T;
1500 }
1501 } else {
1502 switch (link_speed) {
1503 case LINK_1GBIT:
1504 adv_speeds = LINK_HALF_1000BASE_T;
1505 break;
1506 case LINK_100MBIT:
1507 adv_speeds = LINK_HALF_100BASE_T;
1508 break;
1509 default:
1510 adv_speeds = LINK_HALF_10BASE_T;
1511 }
1512 }
1513 }
1514 return adv_speeds;
1515 }
1516 #ifdef CONFIG_ETH_DWC_XGMAC_HW_FILTERING
get_free_mac_addr_indx(const struct device * dev)1517 static inline uint32_t get_free_mac_addr_indx(const struct device *dev)
1518 {
1519 mem_addr_t ioaddr = get_reg_base_addr(dev);
1520 mem_addr_t reg_addr;
1521 uint32_t reg_val;
1522
1523 for (uint32_t idx = 1u; idx < XGMAC_MAX_MAC_ADDR_COUNT; idx++) {
1524 reg_addr = ioaddr + XGMAC_CORE_ADDRx_HIGH(idx);
1525 reg_val = sys_read32(reg_addr);
1526 if (!(reg_val & CORE_MAC_ADDRESS1_HIGH_AE_SET_MSK)) {
1527 return idx;
1528 }
1529 }
1530 LOG_ERR("%s, MAC address filter failed. All MAC address slots are in use", dev->name);
1531 return -EIO;
1532 }
1533
disable_filter_for_mac_addr(const struct device * dev,uint8_t * addr)1534 static inline void disable_filter_for_mac_addr(const struct device *dev, uint8_t *addr)
1535 {
1536 mem_addr_t ioaddr = get_reg_base_addr(dev);
1537 mem_addr_t reg_addr;
1538
1539 for (uint32_t idx = 1u; idx < XGMAC_MAX_MAC_ADDR_COUNT; idx++) {
1540 reg_addr = ioaddr + XGMAC_CORE_ADDRx_HIGH(idx) + 2u;
1541 if (!(memcmp((uint8_t *)reg_addr, addr, 6u))) {
1542 sys_write32(CORE_MAC_ADDRESS1_HIGH_AE_CLR_MSK, XGMAC_CORE_ADDRx_HIGH(idx));
1543 sys_write32(CORE_MAC_ADDRESS1_LOW_ADDRLO_SET_MSK,
1544 XGMAC_CORE_ADDRx_LOW(idx));
1545 }
1546 }
1547 }
1548 #endif
1549 /**
1550 * @brief XGMAC set config function
1551 * XGMAC set config function facilitates to update the existing MAC settings
1552 *
1553 * @param dev Pointer to the ethernet device
1554 * @param type Type of configuration
1555 * @param config Pointer to configuration data
1556 * @retval 0 configuration updated successfully
1557 * @retval -EALREADY in case of:
1558 * (1) if existing configuration is equals to input configuration
1559 * -ENOTSUP for invalid config type
1560 */
eth_dwc_xgmac_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1561 static int eth_dwc_xgmac_set_config(const struct device *dev, enum ethernet_config_type type,
1562 const struct ethernet_config *config)
1563 {
1564 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config;
1565 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1566 const struct device *phy = dev_conf->phy_dev;
1567 const struct ethphy_driver_api *phy_api = phy->api;
1568 enum phy_link_speed adv_speeds;
1569
1570 int retval = 0;
1571
1572 (void)k_mutex_lock(&dev_data->dev_cfg_lock, K_FOREVER);
1573 switch (type) {
1574 case ETHERNET_CONFIG_TYPE_AUTO_NEG:
1575 if (dev_data->auto_neg != config->auto_negotiation) {
1576 dev_data->auto_neg = config->auto_negotiation;
1577 adv_speeds =
1578 get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex,
1579 dev_data->link_speed);
1580 retval = phy_api->cfg_link(phy, adv_speeds);
1581 } else {
1582 retval = -EALREADY;
1583 }
1584 break;
1585 case ETHERNET_CONFIG_TYPE_LINK:
1586 if ((config->l.link_10bt && dev_data->link_speed == LINK_10MBIT) ||
1587 (config->l.link_100bt && dev_data->link_speed == LINK_100MBIT) ||
1588 (config->l.link_1000bt && dev_data->link_speed == LINK_1GBIT)) {
1589 retval = -EALREADY;
1590 break;
1591 }
1592
1593 if (config->l.link_1000bt) {
1594 dev_data->link_speed = LINK_1GBIT;
1595 } else if (config->l.link_100bt) {
1596 dev_data->link_speed = LINK_100MBIT;
1597 } else if (config->l.link_10bt) {
1598 dev_data->link_speed = LINK_10MBIT;
1599 }
1600 adv_speeds = get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex,
1601 dev_data->link_speed);
1602 retval = phy_api->cfg_link(phy, adv_speeds);
1603 break;
1604 case ETHERNET_CONFIG_TYPE_DUPLEX:
1605 if (config->full_duplex == dev_data->enable_full_duplex) {
1606 retval = -EALREADY;
1607 break;
1608 }
1609 dev_data->enable_full_duplex = config->full_duplex;
1610
1611 adv_speeds = get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex,
1612 dev_data->link_speed);
1613 retval = phy_api->cfg_link(phy, adv_speeds);
1614 break;
1615 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1616 memcpy(dev_data->mac_addr, config->mac_address.addr, ETH_MAC_ADDRESS_SIZE);
1617 retval = net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
1618 ETH_MAC_ADDRESS_SIZE, NET_LINK_ETHERNET);
1619 if (retval == 0) {
1620 dwxgmac_set_mac_addr_by_idx(dev, dev_data->mac_addr, 0u, false);
1621 }
1622 break;
1623 #if (!CONFIG_ETH_DWC_XGMAC_PROMISCUOUS_EXCEPTION && CONFIG_NET_PROMISCUOUS_MODE)
1624
1625 case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
1626 mem_addr_t ioaddr = get_reg_base_addr(dev);
1627
1628 if (config->promisc_mode != dev_data->promisc_mode) {
1629 uint32_t reg_val = sys_read32(ioaddr + CORE_MAC_PACKET_FILTER_OFST);
1630
1631 dev_data->promisc_mode = config->promisc_mode;
1632 reg_val &= CORE_MAC_PACKET_FILTER_PR_CLR_MSK;
1633 reg_val |= CORE_MAC_PACKET_FILTER_PR_SET(dev_data->promisc_mode);
1634 sys_write32(reg_val, ioaddr + CORE_MAC_PACKET_FILTER_OFST);
1635 } else {
1636 retval = -EALREADY;
1637 }
1638 break;
1639
1640 #endif
1641
1642 #ifdef CONFIG_ETH_DWC_XGMAC_HW_FILTERING
1643
1644 case ETHERNET_CONFIG_TYPE_FILTER:
1645 if (!(config->filter.set)) {
1646 disable_filter_for_mac_addr(dev,
1647 (uint8_t *)config->filter.mac_address.addr);
1648 } else {
1649 uint32_t mac_idx = get_free_mac_addr_indx(dev);
1650
1651 if (mac_idx > 0u) {
1652 dwxgmac_set_mac_addr_by_idx(
1653 ioaddr, (uint8_t *)config->filter.mac_address.addr, mac_idx,
1654 config->filter.type);
1655 } else {
1656 retval = -EIO;
1657 }
1658 }
1659 break;
1660
1661 #endif
1662
1663 default:
1664 retval = -ENOTSUP;
1665 break;
1666 }
1667 k_mutex_unlock(&dev_data->dev_cfg_lock);
1668
1669 return retval;
1670 }
1671 /**
1672 * @brief XGMAC get config function
1673 * XGMAC get config function facilitates to read the existing MAC settings
1674 *
1675 * @param dev Pointer to the ethernet device
1676 * @param type Type of configuration
1677 * @param config Pointer to configuration data
1678 * @retval 0 get configuration successful
1679 * -ENOTSUP for invalid config type
1680 */
eth_dwc_xgmac_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)1681 static int eth_dwc_xgmac_get_config(const struct device *dev, enum ethernet_config_type type,
1682 struct ethernet_config *config)
1683 {
1684 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1685
1686 switch (type) {
1687 case ETHERNET_CONFIG_TYPE_AUTO_NEG:
1688 config->auto_negotiation = dev_data->auto_neg;
1689 break;
1690 case ETHERNET_CONFIG_TYPE_LINK:
1691 if (dev_data->link_speed == LINK_1GBIT) {
1692 config->l.link_1000bt = true;
1693 } else if (dev_data->link_speed == LINK_100MBIT) {
1694 config->l.link_100bt = true;
1695 } else if (dev_data->link_speed == LINK_10MBIT) {
1696 config->l.link_10bt = true;
1697 }
1698 break;
1699 case ETHERNET_CONFIG_TYPE_DUPLEX:
1700 config->full_duplex = dev_data->enable_full_duplex;
1701 break;
1702 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1703 memcpy(config->mac_address.addr, dev_data->mac_addr, 6);
1704 break;
1705 #if (!CONFIG_ETH_DWC_XGMAC_PROMISCUOUS_EXCEPTION && CONFIG_NET_PROMISCUOUS_MODE)
1706 case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
1707 config->promisc_mode = dev_data->promisc_mode;
1708 break;
1709 #endif
1710 default:
1711 return -ENOTSUP;
1712 }
1713
1714 return 0;
1715 }
1716
1717 /**
1718 * @brief XGMAC capability request function
1719 * Returns the capabilities of the XGMAC controller as an enumeration.
1720 * All of the data returned is derived from the device configuration
1721 * of the current XGMAC device instance.
1722 *
1723 * @param dev Pointer to the ethernet device
1724 * @return Enumeration containing the current XGMAC device's capabilities
1725 */
eth_dwc_xgmac_get_capabilities(const struct device * dev)1726 static enum ethernet_hw_caps eth_dwc_xgmac_get_capabilities(const struct device *dev)
1727 {
1728 ARG_UNUSED(dev);
1729 enum ethernet_hw_caps caps = (enum ethernet_hw_caps)0;
1730
1731 caps = (ETHERNET_LINK_1000BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T |
1732 ETHERNET_AUTO_NEGOTIATION_SET | ETHERNET_DUPLEX_SET);
1733
1734 #ifdef CONFIG_ETH_DWC_XGMAC_RX_CS_OFFLOAD
1735 caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD;
1736 #endif
1737
1738 #ifdef CONFIG_ETH_DWC_XGMAC_TX_CS_OFFLOAD
1739 caps |= ETHERNET_HW_TX_CHKSUM_OFFLOAD;
1740 #endif
1741
1742 #if (!CONFIG_ETH_DWC_XGMAC_PROMISCUOUS_EXCEPTION && CONFIG_NET_PROMISCUOUS_MODE)
1743 caps |= ETHERNET_PROMISC_MODE;
1744 #endif
1745
1746 #ifdef CONFIG_ETH_DWC_XGMAC_HW_FILTERING
1747 caps |= ETHERNET_HW_FILTERING;
1748 #endif
1749
1750 return caps;
1751 }
1752
1753 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
1754 /**
1755 * @brief XGMAC statistics data request function
1756 * Returns a pointer to the statistics data of the current XGMAC controller.
1757 *
1758 * @param dev Pointer to the ethernet device
1759 * @return Pointer to the current XGMAC device's statistics data
1760 */
eth_dwc_xgmac_stats(const struct device * dev)1761 static struct net_stats_eth *eth_dwc_xgmac_stats(const struct device *dev)
1762 {
1763 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data;
1764
1765 return &dev_data->stats;
1766 }
1767 #endif
1768
1769 static const struct ethernet_api eth_dwc_xgmac_apis = {
1770 .iface_api.init = eth_dwc_xgmac_iface_init,
1771 .send = eth_dwc_xgmac_send,
1772 .start = eth_dwc_xgmac_start_device,
1773 .stop = eth_dwc_xgmac_stop_device,
1774 .get_capabilities = eth_dwc_xgmac_get_capabilities,
1775 .set_config = eth_dwc_xgmac_set_config,
1776 .get_config = eth_dwc_xgmac_get_config,
1777 #ifdef CONFIG_NET_STATISTICS_ETHERNET
1778 .get_stats = eth_dwc_xgmac_stats,
1779 #endif /* CONFIG_NET_STATISTICS_ETHERNET */
1780 };
1781
1782 /* Interrupt configuration function macro */
1783 #define ETH_DWC_XGMAC_CONFIG_IRQ_FUNC(port) \
1784 static void eth_dwc_xgmac##port##_irq_config(const struct device *dev) \
1785 { \
1786 ARG_UNUSED(dev); \
1787 IRQ_CONNECT(DT_INST_IRQN(port), DT_INST_IRQ(port, priority), eth_dwc_xgmac_isr, \
1788 DEVICE_DT_INST_GET(port), 0); \
1789 } \
1790 static void eth_dwc_xgmac##port##_irq_enable(const struct device *dev, bool en) \
1791 { \
1792 ARG_UNUSED(dev); \
1793 en ? irq_enable(DT_INST_IRQN(port)) : irq_disable(DT_INST_IRQN(port)); \
1794 } \
1795 volatile uint32_t eth_dwc_xgmac##port##_dma_ch_int_status[DT_INST_PROP(port, num_dma_ch)];
1796
1797 #define ETH_DWC_XGMAC_ALLOC_DMA_DESC(port) \
1798 mem_addr_t eth_dwc_xgmac##port##_tx_pkts[CHLCNT(port)][MAX_TX_RING(port)]; \
1799 mem_addr_t eth_dwc_xgmac##port##_rx_buffs[CHLCNT(port)][MAX_RX_RING(port)] \
1800 [RX_FRAGS_PER_DESC]; \
1801 static struct xgmac_dma_rx_desc \
1802 eth_dwc_xgmac##port##_rx_desc[CHLCNT(port)][MAX_RX_RING(port)] __aligned(32); \
1803 static struct xgmac_dma_tx_desc \
1804 eth_dwc_xgmac##port##_tx_desc[CHLCNT(port)][MAX_TX_RING(port)] __aligned(32); \
1805 static struct xgmac_dma_rx_desc_meta eth_dwc_xgmac##port##_rx_desc_meta[CHLCNT(port)]; \
1806 static struct xgmac_dma_tx_desc_meta eth_dwc_xgmac##port##_tx_desc_meta[CHLCNT(port)];
1807
1808 #define DWC_XGMAC_NUM_QUEUES (DT_INST_PROP(port, num_queues) u)
1809
1810 #define ETH_DWC_XGMAC_DEV_CONFIG_TCQ(port) \
1811 static struct xgmac_tcq_config eth_dwc_xgmac##port##_tcq = { \
1812 .rx_q_ddma_en = DT_INST_PROP(port, rxq_dyn_dma_en), \
1813 .rx_q_dma_chnl_sel = DT_INST_PROP(port, rxq_dma_ch_sel), \
1814 .tx_q_size = DT_INST_PROP(port, txq_size), \
1815 .q_to_tc_map = DT_INST_PROP(port, map_queue_tc), \
1816 .ttc = DT_INST_PROP(port, tx_threshold_ctrl), \
1817 .rx_q_size = DT_INST_PROP(port, rxq_size), \
1818 .tsf_en = DT_INST_PROP(port, tx_store_fwrd_en), \
1819 .hfc_en = DT_INST_PROP(port, hfc_en), \
1820 .cs_err_pkt_drop_dis = DT_INST_PROP(port, cs_error_pkt_drop_dis), \
1821 .rsf_en = DT_INST_PROP(port, rx_store_fwrd_en), \
1822 .fep_en = DT_INST_PROP(port, fep_en), \
1823 .fup_en = DT_INST_PROP(port, fup_en), \
1824 .rtc = DT_INST_PROP(port, rx_threshold_ctrl), \
1825 .pstc = DT_INST_PROP(port, priorities_map_tc), \
1826 };
1827 /* Device run-time data declaration macro */
1828 #define ETH_DWC_XGMAC_DEV_DATA(port) \
1829 static struct eth_dwc_xgmac_dev_data eth_dwc_xgmac##port##_dev_data = { \
1830 .mac_addr = DT_INST_PROP(port, local_mac_address), \
1831 .link_speed = DT_INST_PROP(port, max_speed), \
1832 .auto_neg = true, \
1833 .enable_full_duplex = DT_INST_PROP(port, full_duplex_mode_en), \
1834 .dma_rx_desc = ð_dwc_xgmac##port##_rx_desc[0u][0u], \
1835 .dma_tx_desc = ð_dwc_xgmac##port##_tx_desc[0u][0u], \
1836 .tx_desc_meta = eth_dwc_xgmac##port##_tx_desc_meta, \
1837 .rx_desc_meta = eth_dwc_xgmac##port##_rx_desc_meta, \
1838 .tx_pkts = ð_dwc_xgmac##port##_tx_pkts[0u][0u], \
1839 .rx_buffs = ð_dwc_xgmac##port##_rx_buffs[0u][0u][0u], \
1840 .irq_cntxt_data.dma_chnl_interrupt_sts = eth_dwc_xgmac##port##_dma_ch_int_status, \
1841 };
1842
1843 /* Device configuration data declaration macro */
1844 #define ETH_DWC_XGMAC_DEV_CONFIG(port) \
1845 static const struct eth_dwc_xgmac_config eth_dwc_xgmac##port##_dev_cfg = { \
1846 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(port)), \
1847 .random_mac_address = DT_INST_PROP(port, zephyr_random_mac_address), \
1848 .num_tx_Qs = DT_INST_PROP(port, num_tx_queues), \
1849 .num_rx_Qs = DT_INST_PROP(port, num_rx_queues), \
1850 .num_dma_chnl = DT_INST_PROP(port, num_dma_ch), \
1851 .num_TCs = DT_INST_PROP(port, num_tc), \
1852 .mtu = DT_INST_PROP(port, max_frame_size), \
1853 .tx_fifo_size = DT_INST_PROP(port, tx_fifo_size), \
1854 .rx_fifo_size = DT_INST_PROP(port, rx_fifo_size), \
1855 .dma_cfg.wr_osr_lmt = DT_INST_PROP(port, wr_osr_lmt), \
1856 .dma_cfg.rd_osr_lmt = DT_INST_PROP(port, rd_osr_lmt), \
1857 .dma_cfg.edma_tdps = DT_INST_PROP(port, edma_tdps), \
1858 .dma_cfg.edma_rdps = DT_INST_PROP(port, edma_rdps), \
1859 .dma_cfg.ubl = DT_INST_PROP(port, ubl), \
1860 .dma_cfg.blen4 = DT_INST_PROP(port, blen4), \
1861 .dma_cfg.blen8 = DT_INST_PROP(port, blen8), \
1862 .dma_cfg.blen16 = DT_INST_PROP(port, blen16), \
1863 .dma_cfg.blen32 = DT_INST_PROP(port, blen32), \
1864 .dma_cfg.blen64 = DT_INST_PROP(port, blen64), \
1865 .dma_cfg.blen128 = DT_INST_PROP(port, blen128), \
1866 .dma_cfg.blen256 = DT_INST_PROP(port, blen256), \
1867 .dma_cfg.aal = DT_INST_PROP(port, aal), \
1868 .dma_cfg.eame = DT_INST_PROP(port, eame), \
1869 .dma_chnl_cfg.pblx8 = DT_INST_PROP(port, pblx8), \
1870 .dma_chnl_cfg.mss = DT_INST_PROP(port, dma_ch_mss), \
1871 .dma_chnl_cfg.tdrl = DT_INST_PROP(port, dma_ch_tdrl), \
1872 .dma_chnl_cfg.rdrl = DT_INST_PROP(port, dma_ch_rdrl), \
1873 .dma_chnl_cfg.arbs = DT_INST_PROP(port, dma_ch_arbs), \
1874 .dma_chnl_cfg.rxpbl = DT_INST_PROP(port, dma_ch_rxpbl), \
1875 .dma_chnl_cfg.txpbl = DT_INST_PROP(port, dma_ch_txpbl), \
1876 .dma_chnl_cfg.sph = DT_INST_PROP(port, dma_ch_sph), \
1877 .dma_chnl_cfg.tse = DT_INST_PROP(port, dma_ch_tse), \
1878 .dma_chnl_cfg.osp = DT_INST_PROP(port, dma_ch_osp), \
1879 .mtl_cfg.raa = DT_INST_PROP(port, mtl_raa), \
1880 .mtl_cfg.etsalg = DT_INST_PROP(port, mtl_etsalg), \
1881 .mac_cfg.gpsl = DT_INST_PROP(port, gaint_pkt_size_limit), \
1882 .mac_cfg.arp_offload_en = ETH_DWC_XGMAC_ARP_OFFLOAD, \
1883 .mac_cfg.je = DT_INST_PROP(port, jumbo_pkt_en), \
1884 .tcq_config = ð_dwc_xgmac##port##_tcq, \
1885 .phy_dev = \
1886 (const struct device *)DEVICE_DT_GET(DT_INST_PHANDLE(port, phy_handle)), \
1887 .irq_config_fn = eth_dwc_xgmac##port##_irq_config, \
1888 .irq_enable_fn = eth_dwc_xgmac##port##_irq_enable, \
1889 };
1890
1891 /* Device initialization macro */
1892 #define ETH_DWC_XGMAC_NET_DEV_INIT(port) \
1893 ETH_NET_DEVICE_DT_INST_DEFINE(port, eth_dwc_xgmac_dev_init, NULL, \
1894 ð_dwc_xgmac##port##_dev_data, \
1895 ð_dwc_xgmac##port##_dev_cfg, CONFIG_ETH_INIT_PRIORITY, \
1896 ð_dwc_xgmac_apis, DT_INST_PROP(port, max_frame_size));
1897
1898 /* Top-level device initialization macro - bundles all of the above */
1899 #define ETH_DWC_XGMAC_INITIALIZE(port) \
1900 ETH_DWC_XGMAC_CONFIG_IRQ_FUNC(port) \
1901 ETH_DWC_XGMAC_ALLOC_DMA_DESC(port) \
1902 ETH_DWC_XGMAC_DEV_DATA(port) \
1903 ETH_DWC_XGMAC_DEV_CONFIG_TCQ(port) \
1904 ETH_DWC_XGMAC_DEV_CONFIG(port) \
1905 ETH_DWC_XGMAC_NET_DEV_INIT(port)
1906
1907 /**
1908 * Insert the configuration & run-time data for all XGMAC instances which
1909 * are enabled in the device tree of the current target board.
1910 */
1911 DT_INST_FOREACH_STATUS_OKAY(ETH_DWC_XGMAC_INITIALIZE)
1912