1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_enet_qos_mac
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(eth_nxp_enet_qos_mac, CONFIG_ETHERNET_LOG_LEVEL);
11
12 #include <zephyr/net/phy.h>
13 #include <zephyr/kernel/thread_stack.h>
14 #include <zephyr/sys_clock.h>
15 #include <ethernet/eth_stats.h>
16 #include "../eth.h"
17 #include "nxp_enet_qos_priv.h"
18
19 static const uint32_t rx_desc_refresh_flags =
20 OWN_FLAG | RX_INTERRUPT_ON_COMPLETE_FLAG | BUF1_ADDR_VALID_FLAG;
21
22 K_THREAD_STACK_DEFINE(enet_qos_rx_stack, CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_STACK_SIZE);
23 static struct k_work_q rx_work_queue;
24
rx_queue_init(void)25 static int rx_queue_init(void)
26 {
27 struct k_work_queue_config cfg = {.name = "ENETQOS_RX"};
28
29 k_work_queue_init(&rx_work_queue);
30 k_work_queue_start(&rx_work_queue, enet_qos_rx_stack,
31 K_THREAD_STACK_SIZEOF(enet_qos_rx_stack),
32 K_PRIO_COOP(CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_PRIORITY),
33 &cfg);
34
35 return 0;
36 }
37
38 SYS_INIT(rx_queue_init, POST_KERNEL, 0);
39
eth_nxp_enet_qos_iface_init(struct net_if * iface)40 static void eth_nxp_enet_qos_iface_init(struct net_if *iface)
41 {
42 const struct device *dev = net_if_get_device(iface);
43 struct nxp_enet_qos_mac_data *data = dev->data;
44
45 net_if_set_link_addr(iface, data->mac_addr.addr,
46 sizeof(((struct net_eth_addr *)NULL)->addr), NET_LINK_ETHERNET);
47
48 if (data->iface == NULL) {
49 data->iface = iface;
50 }
51
52 ethernet_init(iface);
53 }
54
eth_nxp_enet_qos_tx(const struct device * dev,struct net_pkt * pkt)55 static int eth_nxp_enet_qos_tx(const struct device *dev, struct net_pkt *pkt)
56 {
57 const struct nxp_enet_qos_mac_config *config = dev->config;
58 struct nxp_enet_qos_mac_data *data = dev->data;
59 enet_qos_t *base = config->base;
60
61 volatile union nxp_enet_qos_tx_desc *tx_desc_ptr = data->tx.descriptors;
62 volatile union nxp_enet_qos_tx_desc *last_desc_ptr;
63
64 struct net_buf *fragment = pkt->frags;
65 int frags_count = 0, total_bytes = 0;
66
67 /* Only allow send of the maximum normal packet size */
68 while (fragment != NULL) {
69 frags_count++;
70 total_bytes += fragment->len;
71 fragment = fragment->frags;
72 }
73
74 if (total_bytes > config->hw_info.max_frame_len ||
75 frags_count > NUM_TX_BUFDESC) {
76 LOG_ERR("TX packet too large");
77 return -E2BIG;
78 }
79
80 /* One TX at a time in the current implementation */
81 k_sem_take(&data->tx.tx_sem, K_FOREVER);
82
83 net_pkt_ref(pkt);
84
85 data->tx.pkt = pkt;
86 /* Need to save the header because the ethernet stack
87 * otherwise discards it from the packet after this call
88 */
89 data->tx.tx_header = pkt->frags;
90
91 LOG_DBG("Setting up TX descriptors for packet %p", pkt);
92
93 /* Reset the descriptors */
94 memset((void *)data->tx.descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * frags_count);
95
96 /* Setting up the descriptors */
97 fragment = pkt->frags;
98 tx_desc_ptr->read.control2 |= FIRST_TX_DESCRIPTOR_FLAG;
99 for (int i = 0; i < frags_count; i++) {
100 net_pkt_frag_ref(fragment);
101
102 tx_desc_ptr->read.buf1_addr = (uint32_t)fragment->data;
103 tx_desc_ptr->read.control1 = FIELD_PREP(0x3FFF, fragment->len);
104 tx_desc_ptr->read.control2 |= FIELD_PREP(0x7FFF, total_bytes);
105
106 fragment = fragment->frags;
107 tx_desc_ptr++;
108 }
109 last_desc_ptr = tx_desc_ptr - 1;
110 last_desc_ptr->read.control2 |= LAST_TX_DESCRIPTOR_FLAG;
111 last_desc_ptr->read.control1 |= TX_INTERRUPT_ON_COMPLETE_FLAG;
112
113 LOG_DBG("Starting TX DMA on packet %p", pkt);
114
115 /* Set the DMA ownership of all the used descriptors */
116 for (int i = 0; i < frags_count; i++) {
117 data->tx.descriptors[i].read.control2 |= OWN_FLAG;
118 }
119
120 /* This implementation is clearly naive and basic, it just changes the
121 * ring length for every TX send, there is room for optimization
122 */
123 base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH = frags_count - 1;
124 base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR =
125 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP,
126 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t) tx_desc_ptr));
127
128 return 0;
129 }
130
tx_dma_done(struct k_work * work)131 static void tx_dma_done(struct k_work *work)
132 {
133 struct nxp_enet_qos_tx_data *tx_data =
134 CONTAINER_OF(work, struct nxp_enet_qos_tx_data, tx_done_work);
135 struct nxp_enet_qos_mac_data *data =
136 CONTAINER_OF(tx_data, struct nxp_enet_qos_mac_data, tx);
137 struct net_pkt *pkt = tx_data->pkt;
138 struct net_buf *fragment = pkt->frags;
139
140 LOG_DBG("TX DMA completed on packet %p", pkt);
141
142 /* Returning the buffers and packet to the pool */
143 while (fragment != NULL) {
144 net_pkt_frag_unref(fragment);
145 fragment = fragment->frags;
146 }
147
148 net_pkt_frag_unref(data->tx.tx_header);
149 net_pkt_unref(pkt);
150
151 eth_stats_update_pkts_tx(data->iface);
152
153 /* Allows another send */
154 k_sem_give(&data->tx.tx_sem);
155 }
156
eth_nxp_enet_qos_get_capabilities(const struct device * dev)157 static enum ethernet_hw_caps eth_nxp_enet_qos_get_capabilities(const struct device *dev)
158 {
159 return ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T;
160 }
161
eth_nxp_enet_qos_rx(struct k_work * work)162 static void eth_nxp_enet_qos_rx(struct k_work *work)
163 {
164 struct nxp_enet_qos_rx_data *rx_data =
165 CONTAINER_OF(work, struct nxp_enet_qos_rx_data, rx_work);
166 struct nxp_enet_qos_mac_data *data =
167 CONTAINER_OF(rx_data, struct nxp_enet_qos_mac_data, rx);
168 volatile union nxp_enet_qos_rx_desc *desc_arr = data->rx.descriptors;
169 volatile union nxp_enet_qos_rx_desc *desc;
170 struct net_pkt *pkt;
171 struct net_buf *new_buf;
172 struct net_buf *buf;
173 size_t pkt_len;
174
175 /* We are going to find all of the descriptors we own and update them */
176 for (int i = 0; i < NUM_RX_BUFDESC; i++) {
177 desc = &desc_arr[i];
178
179 if (desc->write.control3 & OWN_FLAG) {
180 /* The DMA owns the descriptor, we cannot touch it */
181 continue;
182 }
183
184 /* Otherwise, we found a packet that we need to process */
185 pkt = net_pkt_rx_alloc(K_NO_WAIT);
186
187 if (!pkt) {
188 LOG_ERR("Could not alloc RX pkt");
189 goto error;
190 }
191
192 LOG_DBG("Created RX pkt %p", pkt);
193
194 /* We need to know if we can replace the reserved fragment in advance.
195 * At no point can we allow the driver to have less the amount of reserved
196 * buffers it needs to function, so we will not give up our previous buffer
197 * unless we know we can get a new one.
198 */
199 new_buf = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
200 if (new_buf == NULL) {
201 /* We have no choice but to lose the previous packet,
202 * as the buffer is more important. If we recv this packet,
203 * we don't know what the upper layer will do to our poor buffer.
204 */
205 LOG_ERR("No RX buf available");
206 goto error;
207 }
208
209 buf = data->rx.reserved_bufs[i];
210 pkt_len = desc->write.control3 & DESC_RX_PKT_LEN;
211
212 LOG_DBG("Receiving RX packet");
213
214 /* Finally, we have decided that it is time to wrap the buffer nicely
215 * up within a packet, and try to send it. It's only one buffer,
216 * thanks to ENET QOS hardware handing the fragmentation,
217 * so the construction of the packet is very simple.
218 */
219 net_buf_add(buf, pkt_len);
220 net_pkt_frag_insert(pkt, buf);
221 if (net_recv_data(data->iface, pkt)) {
222 LOG_ERR("RECV failed");
223 /* Quite a shame. */
224 goto error;
225 }
226
227 LOG_DBG("Recycling RX buf");
228
229 /* Fresh meat */
230 data->rx.reserved_bufs[i] = new_buf;
231 desc->read.buf1_addr = (uint32_t)new_buf->data;
232 desc->read.control |= rx_desc_refresh_flags;
233
234 /* Record our glorious victory */
235 eth_stats_update_pkts_rx(data->iface);
236 }
237
238 return;
239
240 error:
241 net_pkt_unref(pkt);
242 eth_stats_update_errors_rx(data->iface);
243 }
244
eth_nxp_enet_qos_mac_isr(const struct device * dev)245 static void eth_nxp_enet_qos_mac_isr(const struct device *dev)
246 {
247 const struct nxp_enet_qos_mac_config *config = dev->config;
248 struct nxp_enet_qos_mac_data *data = dev->data;
249 enet_qos_t *base = config->base;
250
251 /* cleared on read */
252 volatile uint32_t mac_interrupts = base->MAC_INTERRUPT_STATUS;
253 volatile uint32_t mac_rx_tx_status = base->MAC_RX_TX_STATUS;
254 volatile uint32_t dma_interrupts = base->DMA_INTERRUPT_STATUS;
255 volatile uint32_t dma_ch0_interrupts = base->DMA_CH[0].DMA_CHX_STAT;
256
257 mac_interrupts; mac_rx_tx_status;
258
259 base->DMA_CH[0].DMA_CHX_STAT = 0xFFFFFFFF;
260
261 if (ENET_QOS_REG_GET(DMA_INTERRUPT_STATUS, DC0IS, dma_interrupts)) {
262 if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, TI, dma_ch0_interrupts)) {
263 k_work_submit(&data->tx.tx_done_work);
264 }
265 if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, RI, dma_ch0_interrupts)) {
266 k_work_submit_to_queue(&rx_work_queue, &data->rx.rx_work);
267 }
268 }
269 }
270
eth_nxp_enet_qos_phy_cb(const struct device * phy,struct phy_link_state * state,void * eth_dev)271 static void eth_nxp_enet_qos_phy_cb(const struct device *phy,
272 struct phy_link_state *state, void *eth_dev)
273 {
274 const struct device *dev = eth_dev;
275 struct nxp_enet_qos_mac_data *data = dev->data;
276
277 if (!data->iface) {
278 return;
279 }
280
281 if (state->is_up) {
282 net_eth_carrier_on(data->iface);
283 } else {
284 net_eth_carrier_off(data->iface);
285 }
286
287 LOG_INF("Link is %s", state->is_up ? "up" : "down");
288 }
289
enet_qos_dma_reset(enet_qos_t * base)290 static inline int enet_qos_dma_reset(enet_qos_t *base)
291 {
292 /* Set the software reset of the DMA */
293 base->DMA_MODE |= ENET_QOS_REG_PREP(DMA_MODE, SWR, 0b1);
294
295 if (CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME == 0) {
296 /* spin and wait forever for the reset flag to clear */
297 while (ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) {
298 ;
299 }
300 goto done;
301 }
302
303 int wait_chunk = DIV_ROUND_UP(CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME,
304 NUM_SWR_WAIT_CHUNKS);
305
306 for (int time_elapsed = 0;
307 time_elapsed < CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME;
308 time_elapsed += wait_chunk) {
309
310 k_busy_wait(wait_chunk);
311
312 if (!ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) {
313 /* DMA cleared the bit */
314 goto done;
315 }
316 }
317
318 /* all ENET QOS domain clocks must resolve to clear software reset,
319 * if getting this error, try checking phy clock connection
320 */
321 LOG_ERR("Can't clear SWR");
322 return -EIO;
323
324 done:
325 return 0;
326 }
327
enet_qos_dma_config_init(enet_qos_t * base)328 static inline void enet_qos_dma_config_init(enet_qos_t *base)
329 {
330 base->DMA_CH[0].DMA_CHX_TX_CTRL |=
331 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, TxPBL, 0b1);
332 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
333 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RxPBL, 0b1);
334 }
335
enet_qos_mtl_config_init(enet_qos_t * base)336 static inline void enet_qos_mtl_config_init(enet_qos_t *base)
337 {
338 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE |=
339 /* Flush the queue */
340 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ, 0b1);
341
342 /* Wait for flush to finish */
343 while (ENET_QOS_REG_GET(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ,
344 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE)) {
345 ;
346 }
347
348 /* Enable only Transmit Queue 0 (optimization/configuration pending) with maximum size */
349 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE =
350 /* Sets the size */
351 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TQS, 0b111) |
352 /* Sets it to on */
353 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TXQEN, 0b10);
354
355 /* Enable only Receive Queue 0 (optimization/configuration pending) with maximum size */
356 base->MTL_QUEUE[0].MTL_RXQX_OP_MODE |=
357 /* Sets the size */
358 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, RQS, 0b111) |
359 /* Keep small packets */
360 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, FUP, 0b1);
361 }
362
enet_qos_mac_config_init(enet_qos_t * base,struct nxp_enet_qos_mac_data * data,uint32_t clk_rate)363 static inline void enet_qos_mac_config_init(enet_qos_t *base,
364 struct nxp_enet_qos_mac_data *data, uint32_t clk_rate)
365 {
366 /* Set MAC address */
367 base->MAC_ADDRESS0_HIGH =
368 ENET_QOS_REG_PREP(MAC_ADDRESS0_HIGH, ADDRHI,
369 data->mac_addr.addr[5] << 8 |
370 data->mac_addr.addr[4]);
371 base->MAC_ADDRESS0_LOW =
372 ENET_QOS_REG_PREP(MAC_ADDRESS0_LOW, ADDRLO,
373 data->mac_addr.addr[3] << 24 |
374 data->mac_addr.addr[2] << 16 |
375 data->mac_addr.addr[1] << 8 |
376 data->mac_addr.addr[0]);
377
378 /* Set the reference for 1 microsecond of ENET QOS CSR clock cycles */
379 base->MAC_ONEUS_TIC_COUNTER =
380 ENET_QOS_REG_PREP(MAC_ONEUS_TIC_COUNTER, TIC_1US_CNTR,
381 (clk_rate / USEC_PER_SEC) - 1);
382
383 base->MAC_CONFIGURATION |=
384 /* For 10/100 Mbps operation */
385 ENET_QOS_REG_PREP(MAC_CONFIGURATION, PS, 0b1) |
386 /* Full duplex mode */
387 ENET_QOS_REG_PREP(MAC_CONFIGURATION, DM, 0b1) |
388 /* 100 Mbps mode */
389 ENET_QOS_REG_PREP(MAC_CONFIGURATION, FES, 0b1) |
390 /* Don't talk unless no one else is talking */
391 ENET_QOS_REG_PREP(MAC_CONFIGURATION, ECRSFD, 0b1);
392
393 /* Enable the MAC RX channel 0 */
394 base->MAC_RXQ_CTRL[0] |=
395 ENET_QOS_REG_PREP(MAC_RXQ_CTRL, RXQ0EN, 0b1);
396 }
397
enet_qos_start(enet_qos_t * base)398 static inline void enet_qos_start(enet_qos_t *base)
399 {
400 /* Set start bits of the RX and TX DMAs */
401 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
402 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, SR, 0b1);
403 base->DMA_CH[0].DMA_CHX_TX_CTRL |=
404 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, ST, 0b1);
405
406 /* Enable interrupts */
407 base->DMA_CH[0].DMA_CHX_INT_EN =
408 /* Normal interrupts (includes tx, rx) */
409 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, NIE, 0b1) |
410 /* Transmit interrupt */
411 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, TIE, 0b1) |
412 /* Receive interrupt */
413 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, RIE, 0b1);
414 base->MAC_INTERRUPT_ENABLE =
415 /* Receive and Transmit IRQs */
416 ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, TXSTSIE, 0b1) |
417 ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, RXSTSIE, 0b1);
418
419 /* Start the TX and RX on the MAC */
420 base->MAC_CONFIGURATION |=
421 ENET_QOS_REG_PREP(MAC_CONFIGURATION, TE, 0b1) |
422 ENET_QOS_REG_PREP(MAC_CONFIGURATION, RE, 0b1);
423 }
424
enet_qos_tx_desc_init(enet_qos_t * base,struct nxp_enet_qos_tx_data * tx)425 static inline void enet_qos_tx_desc_init(enet_qos_t *base, struct nxp_enet_qos_tx_data *tx)
426 {
427 memset((void *)tx->descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * NUM_TX_BUFDESC);
428
429 base->DMA_CH[0].DMA_CHX_TXDESC_LIST_ADDR =
430 /* Start of tx descriptors buffer */
431 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_LIST_ADDR, TDESLA,
432 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors));
433 base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR =
434 /* Do not move the tail pointer past the start until send is requested */
435 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP,
436 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors));
437 base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH =
438 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_RING_LENGTH, TDRL, NUM_TX_BUFDESC);
439 }
440
enet_qos_rx_desc_init(enet_qos_t * base,struct nxp_enet_qos_rx_data * rx)441 static inline int enet_qos_rx_desc_init(enet_qos_t *base, struct nxp_enet_qos_rx_data *rx)
442 {
443 struct net_buf *buf;
444
445 memset((void *)rx->descriptors, 0, sizeof(union nxp_enet_qos_rx_desc) * NUM_RX_BUFDESC);
446
447 /* Here we reserve an RX buffer for each of the DMA descriptors. */
448 for (int i = 0; i < NUM_RX_BUFDESC; i++) {
449 buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
450 if (buf == NULL) {
451 LOG_ERR("Missing a buf");
452 return -ENOMEM;
453 }
454 rx->reserved_bufs[i] = buf;
455 rx->descriptors[i].read.buf1_addr = (uint32_t)buf->data;
456 rx->descriptors[i].read.control |= rx_desc_refresh_flags;
457 }
458
459 /* Set up RX descriptors on channel 0 */
460 base->DMA_CH[0].DMA_CHX_RXDESC_LIST_ADDR =
461 /* Start of tx descriptors buffer */
462 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_LIST_ADDR, RDESLA,
463 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[0]));
464 base->DMA_CH[0].DMA_CHX_RXDESC_TAIL_PTR =
465 /* When the DMA reaches the tail pointer, it suspends. Set to last descriptor */
466 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_TAIL_PTR, RDTP,
467 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[NUM_RX_BUFDESC]));
468 base->DMA_CH[0].DMA_CHX_RX_CONTROL2 =
469 /* Ring length == Buffer size. Register is this value minus one. */
470 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CONTROL2, RDRL, NUM_RX_BUFDESC - 1);
471 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
472 /* Set DMA receive buffer size. The low 2 bits are not entered to this field. */
473 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RBSZ_13_Y, NET_ETH_MAX_FRAME_SIZE >> 2);
474
475 return 0;
476 }
477
eth_nxp_enet_qos_mac_init(const struct device * dev)478 static int eth_nxp_enet_qos_mac_init(const struct device *dev)
479 {
480 const struct nxp_enet_qos_mac_config *config = dev->config;
481 struct nxp_enet_qos_mac_data *data = dev->data;
482 struct nxp_enet_qos_config *module_cfg = ENET_QOS_MODULE_CFG(config->enet_dev);
483 enet_qos_t *base = module_cfg->base;
484 uint32_t clk_rate;
485 int ret;
486
487 /* Used to configure timings of the MAC */
488 ret = clock_control_get_rate(module_cfg->clock_dev, module_cfg->clock_subsys, &clk_rate);
489 if (ret) {
490 return ret;
491 }
492
493 /* For reporting the status of the link connection */
494 ret = phy_link_callback_set(config->phy_dev, eth_nxp_enet_qos_phy_cb, (void *)dev);
495 if (ret) {
496 return ret;
497 }
498
499 /* Random mac therefore overrides local mac that may have been initialized */
500 if (config->random_mac) {
501 gen_random_mac(data->mac_addr.addr,
502 NXP_OUI_BYTE_0, NXP_OUI_BYTE_1, NXP_OUI_BYTE_2);
503 }
504
505 /* This driver cannot work without interrupts. */
506 if (config->irq_config_func) {
507 config->irq_config_func();
508 } else {
509 return -ENOSYS;
510 }
511
512 /* Effectively reset of the peripheral */
513 ret = enet_qos_dma_reset(base);
514 if (ret) {
515 return ret;
516 }
517
518 /* DMA is the interface presented to software for interaction by the ENET module */
519 enet_qos_dma_config_init(base);
520
521 /*
522 * MTL = MAC Translation Layer.
523 * MTL is an asynchronous circuit needed because the MAC transmitter/receiver
524 * and the DMA interface are on different clock domains, MTL compromises the two.
525 */
526 enet_qos_mtl_config_init(base);
527
528 /* Configuration of the actual MAC hardware */
529 enet_qos_mac_config_init(base, data, clk_rate);
530
531 /* Current use of TX descriptor in the driver is such that
532 * one packet is sent at a time, and each descriptor is used
533 * to collect the fragments of it from the networking stack,
534 * and send them with a zero copy implementation.
535 */
536 enet_qos_tx_desc_init(base, &data->tx);
537
538 /* Current use of RX descriptor in the driver is such that
539 * each RX descriptor corresponds to a reserved fragment, that will
540 * hold the entirety of the contents of a packet. And these fragments
541 * are recycled in and out of the RX pkt buf pool to achieve a zero copy implementation.
542 */
543 ret = enet_qos_rx_desc_init(base, &data->rx);
544 if (ret) {
545 return ret;
546 }
547
548 /* Clearly, start the cogs to motion. */
549 enet_qos_start(base);
550
551 /* The tx sem is taken during ethernet send function,
552 * and given when DMA transmission is finished. Ie, send calls will be blocked
553 * until the DMA is available again. This is therefore a simple but naive implementation.
554 */
555 k_sem_init(&data->tx.tx_sem, 1, 1);
556
557 /* Work upon a reception of a packet to a buffer */
558 k_work_init(&data->rx.rx_work, eth_nxp_enet_qos_rx);
559
560 /* Work upon a complete transmission by a channel's TX DMA */
561 k_work_init(&data->tx.tx_done_work, tx_dma_done);
562
563 return ret;
564 }
565
eth_nxp_enet_qos_get_phy(const struct device * dev)566 static const struct device *eth_nxp_enet_qos_get_phy(const struct device *dev)
567 {
568 const struct nxp_enet_qos_mac_config *config = dev->config;
569
570 return config->phy_dev;
571 }
572
573
574
eth_nxp_enet_qos_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * cfg)575 static int eth_nxp_enet_qos_set_config(const struct device *dev,
576 enum ethernet_config_type type,
577 const struct ethernet_config *cfg)
578 {
579 const struct nxp_enet_qos_mac_config *config = dev->config;
580 struct nxp_enet_qos_mac_data *data = dev->data;
581 struct nxp_enet_qos_config *module_cfg = ENET_QOS_MODULE_CFG(config->enet_dev);
582 enet_qos_t *base = module_cfg->base;
583
584 switch (type) {
585 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
586 memcpy(data->mac_addr.addr,
587 cfg->mac_address.addr,
588 sizeof(data->mac_addr.addr));
589 /* Set MAC address */
590 base->MAC_ADDRESS0_HIGH =
591 ENET_QOS_REG_PREP(MAC_ADDRESS0_HIGH, ADDRHI,
592 data->mac_addr.addr[5] << 8 |
593 data->mac_addr.addr[4]);
594 base->MAC_ADDRESS0_LOW =
595 ENET_QOS_REG_PREP(MAC_ADDRESS0_LOW, ADDRLO,
596 data->mac_addr.addr[3] << 24 |
597 data->mac_addr.addr[2] << 16 |
598 data->mac_addr.addr[1] << 8 |
599 data->mac_addr.addr[0]);
600 net_if_set_link_addr(data->iface, data->mac_addr.addr,
601 sizeof(data->mac_addr.addr),
602 NET_LINK_ETHERNET);
603 LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
604 dev->name,
605 data->mac_addr.addr[0], data->mac_addr.addr[1],
606 data->mac_addr.addr[2], data->mac_addr.addr[3],
607 data->mac_addr.addr[4], data->mac_addr.addr[5]);
608 return 0;
609 default:
610 break;
611 }
612
613 return -ENOTSUP;
614 }
615
616 static const struct ethernet_api api_funcs = {
617 .iface_api.init = eth_nxp_enet_qos_iface_init,
618 .send = eth_nxp_enet_qos_tx,
619 .get_capabilities = eth_nxp_enet_qos_get_capabilities,
620 .get_phy = eth_nxp_enet_qos_get_phy,
621 .set_config = eth_nxp_enet_qos_set_config,
622 };
623
624 #define NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \
625 BUILD_ASSERT(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)) || \
626 DT_INST_PROP(n, zephyr_random_mac_address), \
627 "MAC address not specified on ENET QOS DT node");
628
629 #define NXP_ENET_QOS_CONNECT_IRQS(node_id, prop, idx) \
630 do { \
631 IRQ_CONNECT(DT_IRQN_BY_IDX(node_id, idx), \
632 DT_IRQ_BY_IDX(node_id, idx, priority), \
633 eth_nxp_enet_qos_mac_isr, \
634 DEVICE_DT_GET(node_id), \
635 0); \
636 irq_enable(DT_IRQN_BY_IDX(node_id, idx)); \
637 } while (false);
638
639 #define NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \
640 static void nxp_enet_qos_##n##_irq_config_func(void) \
641 { \
642 DT_FOREACH_PROP_ELEM(DT_DRV_INST(n), \
643 interrupt_names, \
644 NXP_ENET_QOS_CONNECT_IRQS) \
645 }
646
647 #define NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n) \
648 static const struct nxp_enet_qos_mac_config enet_qos_##n##_mac_config = { \
649 .enet_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \
650 .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \
651 .base = (enet_qos_t *)DT_REG_ADDR(DT_INST_PARENT(n)), \
652 .hw_info = { \
653 .max_frame_len = ENET_QOS_MAX_NORMAL_FRAME_LEN, \
654 }, \
655 .irq_config_func = nxp_enet_qos_##n##_irq_config_func, \
656 .random_mac = DT_INST_PROP(n, zephyr_random_mac_address), \
657 }; \
658 \
659 static struct nxp_enet_qos_mac_data enet_qos_##n##_mac_data = \
660 { \
661 .mac_addr.addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \
662 };
663
664 #define NXP_ENET_QOS_DRIVER_INIT(n) \
665 NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \
666 NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \
667 NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n)
668
669 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_DRIVER_INIT)
670
671 #define NXP_ENET_QOS_MAC_DEVICE_DEFINE(n) \
672 ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_qos_mac_init, NULL, \
673 &enet_qos_##n##_mac_data, &enet_qos_##n##_mac_config, \
674 CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU);
675
676 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_MAC_DEVICE_DEFINE)
677