1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_enet_qos_mac
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(eth_nxp_enet_qos_mac, CONFIG_ETHERNET_LOG_LEVEL);
11
12 #include <zephyr/net/phy.h>
13 #include <zephyr/kernel/thread_stack.h>
14 #include <zephyr/sys_clock.h>
15 #if defined(CONFIG_ETH_NXP_ENET_QOS_MAC_UNIQUE_MAC_ADDRESS)
16 #include <zephyr/sys/crc.h>
17 #include <zephyr/drivers/hwinfo.h>
18 #endif
19 #include <ethernet/eth_stats.h>
20 #include "../eth.h"
21 #include "nxp_enet_qos_priv.h"
22
23 static const uint32_t rx_desc_refresh_flags =
24 OWN_FLAG | RX_INTERRUPT_ON_COMPLETE_FLAG | BUF1_ADDR_VALID_FLAG;
25
26 K_THREAD_STACK_DEFINE(enet_qos_rx_stack, CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_STACK_SIZE);
27 static struct k_work_q rx_work_queue;
28
rx_queue_init(void)29 static int rx_queue_init(void)
30 {
31 struct k_work_queue_config cfg = {.name = "ENETQOS_RX"};
32
33 k_work_queue_init(&rx_work_queue);
34 k_work_queue_start(&rx_work_queue, enet_qos_rx_stack,
35 K_THREAD_STACK_SIZEOF(enet_qos_rx_stack),
36 K_PRIO_COOP(CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_PRIORITY),
37 &cfg);
38
39 return 0;
40 }
41
42 SYS_INIT(rx_queue_init, POST_KERNEL, 0);
43
eth_nxp_enet_qos_iface_init(struct net_if * iface)44 static void eth_nxp_enet_qos_iface_init(struct net_if *iface)
45 {
46 const struct device *dev = net_if_get_device(iface);
47 struct nxp_enet_qos_mac_data *data = dev->data;
48
49 net_if_set_link_addr(iface, data->mac_addr.addr,
50 sizeof(((struct net_eth_addr *)NULL)->addr), NET_LINK_ETHERNET);
51
52 if (data->iface == NULL) {
53 data->iface = iface;
54 }
55
56 ethernet_init(iface);
57 }
58
eth_nxp_enet_qos_tx(const struct device * dev,struct net_pkt * pkt)59 static int eth_nxp_enet_qos_tx(const struct device *dev, struct net_pkt *pkt)
60 {
61 const struct nxp_enet_qos_mac_config *config = dev->config;
62 struct nxp_enet_qos_mac_data *data = dev->data;
63 enet_qos_t *base = config->base;
64
65 volatile union nxp_enet_qos_tx_desc *tx_desc_ptr = data->tx.descriptors;
66 volatile union nxp_enet_qos_tx_desc *last_desc_ptr;
67
68 struct net_buf *fragment = pkt->frags;
69 int frags_count = 0, total_bytes = 0;
70
71 /* Only allow send of the maximum normal packet size */
72 while (fragment != NULL) {
73 frags_count++;
74 total_bytes += fragment->len;
75 fragment = fragment->frags;
76
77 if (total_bytes > config->hw_info.max_frame_len ||
78 frags_count > NUM_TX_BUFDESC) {
79 LOG_ERR("TX packet too large");
80 return -E2BIG;
81 }
82 }
83
84
85 /* One TX at a time in the current implementation */
86 k_sem_take(&data->tx.tx_sem, K_FOREVER);
87
88 net_pkt_ref(pkt);
89
90 data->tx.pkt = pkt;
91 /* Need to save the header because the ethernet stack
92 * otherwise discards it from the packet after this call
93 */
94 data->tx.tx_header = pkt->frags;
95
96 LOG_DBG("Setting up TX descriptors for packet %p", pkt);
97
98 /* Reset the descriptors */
99 memset((void *)data->tx.descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * frags_count);
100
101 /* Setting up the descriptors */
102 fragment = pkt->frags;
103 tx_desc_ptr->read.control2 |= FIRST_TX_DESCRIPTOR_FLAG;
104 for (int i = 0; i < frags_count; i++) {
105 net_pkt_frag_ref(fragment);
106
107 tx_desc_ptr->read.buf1_addr = (uint32_t)fragment->data;
108 tx_desc_ptr->read.control1 = FIELD_PREP(0x3FFF, fragment->len);
109 tx_desc_ptr->read.control2 |= FIELD_PREP(0x7FFF, total_bytes);
110
111 fragment = fragment->frags;
112 tx_desc_ptr++;
113 }
114 last_desc_ptr = tx_desc_ptr - 1;
115 last_desc_ptr->read.control2 |= LAST_TX_DESCRIPTOR_FLAG;
116 last_desc_ptr->read.control1 |= TX_INTERRUPT_ON_COMPLETE_FLAG;
117
118 LOG_DBG("Starting TX DMA on packet %p", pkt);
119
120 /* Set the DMA ownership of all the used descriptors */
121 for (int i = 0; i < frags_count; i++) {
122 data->tx.descriptors[i].read.control2 |= OWN_FLAG;
123 }
124
125 /* This implementation is clearly naive and basic, it just changes the
126 * ring length for every TX send, there is room for optimization
127 */
128 base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH = frags_count - 1;
129 base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR =
130 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP,
131 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t) tx_desc_ptr));
132
133 return 0;
134 }
135
tx_dma_done(struct k_work * work)136 static void tx_dma_done(struct k_work *work)
137 {
138 struct nxp_enet_qos_tx_data *tx_data =
139 CONTAINER_OF(work, struct nxp_enet_qos_tx_data, tx_done_work);
140 struct nxp_enet_qos_mac_data *data =
141 CONTAINER_OF(tx_data, struct nxp_enet_qos_mac_data, tx);
142 struct net_pkt *pkt = tx_data->pkt;
143 struct net_buf *fragment = pkt->frags;
144
145 LOG_DBG("TX DMA completed on packet %p", pkt);
146
147 /* Returning the buffers and packet to the pool */
148 while (fragment != NULL) {
149 net_pkt_frag_unref(fragment);
150 fragment = fragment->frags;
151 }
152
153 net_pkt_frag_unref(data->tx.tx_header);
154 net_pkt_unref(pkt);
155
156 eth_stats_update_pkts_tx(data->iface);
157
158 /* Allows another send */
159 k_sem_give(&data->tx.tx_sem);
160 }
161
eth_nxp_enet_qos_get_capabilities(const struct device * dev)162 static enum ethernet_hw_caps eth_nxp_enet_qos_get_capabilities(const struct device *dev)
163 {
164 return ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T | ENET_MAC_PACKET_FILTER_PM_MASK;
165 }
166
eth_nxp_enet_qos_rx(struct k_work * work)167 static void eth_nxp_enet_qos_rx(struct k_work *work)
168 {
169 struct nxp_enet_qos_rx_data *rx_data =
170 CONTAINER_OF(work, struct nxp_enet_qos_rx_data, rx_work);
171 struct nxp_enet_qos_mac_data *data =
172 CONTAINER_OF(rx_data, struct nxp_enet_qos_mac_data, rx);
173 volatile union nxp_enet_qos_rx_desc *desc_arr = data->rx.descriptors;
174 volatile union nxp_enet_qos_rx_desc *desc;
175 struct net_pkt *pkt;
176 struct net_buf *new_buf;
177 struct net_buf *buf;
178 size_t pkt_len;
179
180 /* We are going to find all of the descriptors we own and update them */
181 for (int i = 0; i < NUM_RX_BUFDESC; i++) {
182 desc = &desc_arr[i];
183
184 if (desc->write.control3 & OWN_FLAG) {
185 /* The DMA owns the descriptor, we cannot touch it */
186 continue;
187 }
188
189 /* Otherwise, we found a packet that we need to process */
190 pkt = net_pkt_rx_alloc(K_NO_WAIT);
191
192 if (!pkt) {
193 LOG_ERR("Could not alloc RX pkt");
194 goto error;
195 }
196
197 LOG_DBG("Created RX pkt %p", pkt);
198
199 /* We need to know if we can replace the reserved fragment in advance.
200 * At no point can we allow the driver to have less the amount of reserved
201 * buffers it needs to function, so we will not give up our previous buffer
202 * unless we know we can get a new one.
203 */
204 new_buf = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
205 if (new_buf == NULL) {
206 /* We have no choice but to lose the previous packet,
207 * as the buffer is more important. If we recv this packet,
208 * we don't know what the upper layer will do to our poor buffer.
209 */
210 LOG_ERR("No RX buf available");
211 goto error;
212 }
213
214 buf = data->rx.reserved_bufs[i];
215 pkt_len = desc->write.control3 & DESC_RX_PKT_LEN;
216
217 LOG_DBG("Receiving RX packet");
218
219 /* Finally, we have decided that it is time to wrap the buffer nicely
220 * up within a packet, and try to send it. It's only one buffer,
221 * thanks to ENET QOS hardware handing the fragmentation,
222 * so the construction of the packet is very simple.
223 */
224 net_buf_add(buf, pkt_len);
225 net_pkt_frag_insert(pkt, buf);
226 if (net_recv_data(data->iface, pkt)) {
227 LOG_ERR("RECV failed");
228 /* Quite a shame. */
229 goto error;
230 }
231
232 LOG_DBG("Recycling RX buf");
233
234 /* Fresh meat */
235 data->rx.reserved_bufs[i] = new_buf;
236 desc->read.buf1_addr = (uint32_t)new_buf->data;
237 desc->read.control |= rx_desc_refresh_flags;
238
239 /* Record our glorious victory */
240 eth_stats_update_pkts_rx(data->iface);
241 }
242
243 return;
244
245 error:
246 net_pkt_unref(pkt);
247 eth_stats_update_errors_rx(data->iface);
248 }
249
eth_nxp_enet_qos_mac_isr(const struct device * dev)250 static void eth_nxp_enet_qos_mac_isr(const struct device *dev)
251 {
252 const struct nxp_enet_qos_mac_config *config = dev->config;
253 struct nxp_enet_qos_mac_data *data = dev->data;
254 enet_qos_t *base = config->base;
255
256 /* cleared on read */
257 volatile uint32_t mac_interrupts = base->MAC_INTERRUPT_STATUS;
258 volatile uint32_t mac_rx_tx_status = base->MAC_RX_TX_STATUS;
259 volatile uint32_t dma_interrupts = base->DMA_INTERRUPT_STATUS;
260 volatile uint32_t dma_ch0_interrupts = base->DMA_CH[0].DMA_CHX_STAT;
261
262 mac_interrupts; mac_rx_tx_status;
263
264 base->DMA_CH[0].DMA_CHX_STAT = 0xFFFFFFFF;
265
266 if (ENET_QOS_REG_GET(DMA_INTERRUPT_STATUS, DC0IS, dma_interrupts)) {
267 if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, TI, dma_ch0_interrupts)) {
268 k_work_submit(&data->tx.tx_done_work);
269 }
270 if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, RI, dma_ch0_interrupts)) {
271 k_work_submit_to_queue(&rx_work_queue, &data->rx.rx_work);
272 }
273 }
274 }
275
eth_nxp_enet_qos_phy_cb(const struct device * phy,struct phy_link_state * state,void * eth_dev)276 static void eth_nxp_enet_qos_phy_cb(const struct device *phy,
277 struct phy_link_state *state, void *eth_dev)
278 {
279 const struct device *dev = eth_dev;
280 struct nxp_enet_qos_mac_data *data = dev->data;
281
282 if (!data->iface) {
283 return;
284 }
285
286 if (state->is_up) {
287 net_eth_carrier_on(data->iface);
288 } else {
289 net_eth_carrier_off(data->iface);
290 }
291
292 LOG_INF("Link is %s", state->is_up ? "up" : "down");
293 }
294
enet_qos_dma_reset(enet_qos_t * base)295 static inline int enet_qos_dma_reset(enet_qos_t *base)
296 {
297 /* Set the software reset of the DMA */
298 base->DMA_MODE |= ENET_QOS_REG_PREP(DMA_MODE, SWR, 0b1);
299
300 if (CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME == 0) {
301 /* spin and wait forever for the reset flag to clear */
302 while (ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) {
303 ;
304 }
305 goto done;
306 }
307
308 int wait_chunk = DIV_ROUND_UP(CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME,
309 NUM_SWR_WAIT_CHUNKS);
310
311 for (int time_elapsed = 0;
312 time_elapsed < CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME;
313 time_elapsed += wait_chunk) {
314
315 k_busy_wait(wait_chunk);
316
317 if (!ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) {
318 /* DMA cleared the bit */
319 goto done;
320 }
321 }
322
323 /* all ENET QOS domain clocks must resolve to clear software reset,
324 * if getting this error, try checking phy clock connection
325 */
326 LOG_ERR("Can't clear SWR");
327 return -EIO;
328
329 done:
330 return 0;
331 }
332
enet_qos_dma_config_init(enet_qos_t * base)333 static inline void enet_qos_dma_config_init(enet_qos_t *base)
334 {
335 base->DMA_CH[0].DMA_CHX_TX_CTRL |=
336 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, TxPBL, 0b1);
337 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
338 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RxPBL, 0b1);
339 }
340
enet_qos_mtl_config_init(enet_qos_t * base)341 static inline void enet_qos_mtl_config_init(enet_qos_t *base)
342 {
343 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE |=
344 /* Flush the queue */
345 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ, 0b1);
346
347 /* Wait for flush to finish */
348 while (ENET_QOS_REG_GET(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ,
349 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE)) {
350 ;
351 }
352
353 /* Enable only Transmit Queue 0 (optimization/configuration pending) with maximum size */
354 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE =
355 /* Sets the size */
356 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TQS, 0b111) |
357 /* Sets it to on */
358 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TXQEN, 0b10);
359
360 /* Enable only Receive Queue 0 (optimization/configuration pending) with maximum size */
361 base->MTL_QUEUE[0].MTL_RXQX_OP_MODE |=
362 /* Sets the size */
363 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, RQS, 0b111) |
364 /* Keep small packets */
365 ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, FUP, 0b1);
366 }
367
enet_qos_mac_config_init(enet_qos_t * base,struct nxp_enet_qos_mac_data * data,uint32_t clk_rate)368 static inline void enet_qos_mac_config_init(enet_qos_t *base,
369 struct nxp_enet_qos_mac_data *data, uint32_t clk_rate)
370 {
371 /* Set MAC address */
372 base->MAC_ADDRESS0_HIGH =
373 ENET_QOS_REG_PREP(MAC_ADDRESS0_HIGH, ADDRHI,
374 data->mac_addr.addr[5] << 8 |
375 data->mac_addr.addr[4]);
376 base->MAC_ADDRESS0_LOW =
377 ENET_QOS_REG_PREP(MAC_ADDRESS0_LOW, ADDRLO,
378 data->mac_addr.addr[3] << 24 |
379 data->mac_addr.addr[2] << 16 |
380 data->mac_addr.addr[1] << 8 |
381 data->mac_addr.addr[0]);
382
383 /* permit multicast packets if there is no space in hash table for mac addresses */
384 if ((base->MAC_HW_FEAT[1] & ENET_MAC_HW_FEAT_HASHTBLSZ_MASK) == 0) {
385 base->MAC_PACKET_FILTER |= ENET_MAC_PACKET_FILTER_PM_MASK;
386 }
387
388 /* Set the reference for 1 microsecond of ENET QOS CSR clock cycles */
389 base->MAC_ONEUS_TIC_COUNTER =
390 ENET_QOS_REG_PREP(MAC_ONEUS_TIC_COUNTER, TIC_1US_CNTR,
391 (clk_rate / USEC_PER_SEC) - 1);
392
393 base->MAC_CONFIGURATION |=
394 /* For 10/100 Mbps operation */
395 ENET_QOS_REG_PREP(MAC_CONFIGURATION, PS, 0b1) |
396 /* Full duplex mode */
397 ENET_QOS_REG_PREP(MAC_CONFIGURATION, DM, 0b1) |
398 /* 100 Mbps mode */
399 ENET_QOS_REG_PREP(MAC_CONFIGURATION, FES, 0b1) |
400 /* Don't talk unless no one else is talking */
401 ENET_QOS_REG_PREP(MAC_CONFIGURATION, ECRSFD, 0b1);
402
403 /* Enable the MAC RX channel 0 */
404 base->MAC_RXQ_CTRL[0] |=
405 ENET_QOS_REG_PREP(MAC_RXQ_CTRL, RXQ0EN, 0b1);
406 }
407
enet_qos_start(enet_qos_t * base)408 static inline void enet_qos_start(enet_qos_t *base)
409 {
410 /* Set start bits of the RX and TX DMAs */
411 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
412 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, SR, 0b1);
413 base->DMA_CH[0].DMA_CHX_TX_CTRL |=
414 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, ST, 0b1);
415
416 /* Enable interrupts */
417 base->DMA_CH[0].DMA_CHX_INT_EN =
418 /* Normal interrupts (includes tx, rx) */
419 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, NIE, 0b1) |
420 /* Transmit interrupt */
421 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, TIE, 0b1) |
422 /* Receive interrupt */
423 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, RIE, 0b1);
424 base->MAC_INTERRUPT_ENABLE =
425 /* Receive and Transmit IRQs */
426 ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, TXSTSIE, 0b1) |
427 ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, RXSTSIE, 0b1);
428
429 /* Start the TX and RX on the MAC */
430 base->MAC_CONFIGURATION |=
431 ENET_QOS_REG_PREP(MAC_CONFIGURATION, TE, 0b1) |
432 ENET_QOS_REG_PREP(MAC_CONFIGURATION, RE, 0b1);
433 }
434
enet_qos_tx_desc_init(enet_qos_t * base,struct nxp_enet_qos_tx_data * tx)435 static inline void enet_qos_tx_desc_init(enet_qos_t *base, struct nxp_enet_qos_tx_data *tx)
436 {
437 memset((void *)tx->descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * NUM_TX_BUFDESC);
438
439 base->DMA_CH[0].DMA_CHX_TXDESC_LIST_ADDR =
440 /* Start of tx descriptors buffer */
441 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_LIST_ADDR, TDESLA,
442 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors));
443 base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR =
444 /* Do not move the tail pointer past the start until send is requested */
445 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP,
446 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors));
447 base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH =
448 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_RING_LENGTH, TDRL, NUM_TX_BUFDESC);
449 }
450
enet_qos_rx_desc_init(enet_qos_t * base,struct nxp_enet_qos_rx_data * rx)451 static inline int enet_qos_rx_desc_init(enet_qos_t *base, struct nxp_enet_qos_rx_data *rx)
452 {
453 struct net_buf *buf;
454
455 memset((void *)rx->descriptors, 0, sizeof(union nxp_enet_qos_rx_desc) * NUM_RX_BUFDESC);
456
457 /* Here we reserve an RX buffer for each of the DMA descriptors. */
458 for (int i = 0; i < NUM_RX_BUFDESC; i++) {
459 buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
460 if (buf == NULL) {
461 LOG_ERR("Missing a buf");
462 return -ENOMEM;
463 }
464 rx->reserved_bufs[i] = buf;
465 rx->descriptors[i].read.buf1_addr = (uint32_t)buf->data;
466 rx->descriptors[i].read.control |= rx_desc_refresh_flags;
467 }
468
469 /* Set up RX descriptors on channel 0 */
470 base->DMA_CH[0].DMA_CHX_RXDESC_LIST_ADDR =
471 /* Start of tx descriptors buffer */
472 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_LIST_ADDR, RDESLA,
473 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[0]));
474 base->DMA_CH[0].DMA_CHX_RXDESC_TAIL_PTR =
475 /* When the DMA reaches the tail pointer, it suspends. Set to last descriptor */
476 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_TAIL_PTR, RDTP,
477 ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[NUM_RX_BUFDESC]));
478 base->DMA_CH[0].DMA_CHX_RX_CONTROL2 =
479 /* Ring length == Buffer size. Register is this value minus one. */
480 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CONTROL2, RDRL, NUM_RX_BUFDESC - 1);
481 base->DMA_CH[0].DMA_CHX_RX_CTRL |=
482 /* Set DMA receive buffer size. The low 2 bits are not entered to this field. */
483 ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RBSZ_13_Y, NET_ETH_MAX_FRAME_SIZE >> 2);
484
485 return 0;
486 }
487
488 #if defined(CONFIG_ETH_NXP_ENET_QOS_MAC_UNIQUE_MAC_ADDRESS)
489 /* Note this is not universally unique, it just is probably unique on a network */
nxp_enet_unique_mac(uint8_t * mac_addr)490 static inline void nxp_enet_unique_mac(uint8_t *mac_addr)
491 {
492 uint8_t unique_device_ID_16_bytes[16] = {0};
493 ssize_t uuid_length =
494 hwinfo_get_device_id(unique_device_ID_16_bytes, sizeof(unique_device_ID_16_bytes));
495 uint32_t hash = 0;
496
497 if (uuid_length > 0) {
498 hash = crc24_pgp((uint8_t *)unique_device_ID_16_bytes, uuid_length);
499 } else {
500 LOG_ERR("No unique MAC can be provided in this platform");
501 }
502
503 /* Setting LAA bit because it is not guaranteed universally unique */
504 mac_addr[0] = NXP_OUI_BYTE_0 | 0x02;
505 mac_addr[1] = NXP_OUI_BYTE_1;
506 mac_addr[2] = NXP_OUI_BYTE_2;
507 mac_addr[3] = FIELD_GET(0xFF0000, hash);
508 mac_addr[4] = FIELD_GET(0x00FF00, hash);
509 mac_addr[5] = FIELD_GET(0x0000FF, hash);
510 }
511 #else
512 #define nxp_enet_unique_mac(arg)
513 #endif
514
eth_nxp_enet_qos_mac_init(const struct device * dev)515 static int eth_nxp_enet_qos_mac_init(const struct device *dev)
516 {
517 const struct nxp_enet_qos_mac_config *config = dev->config;
518 struct nxp_enet_qos_mac_data *data = dev->data;
519 struct nxp_enet_qos_config *module_cfg = ENET_QOS_MODULE_CFG(config->enet_dev);
520 enet_qos_t *base = module_cfg->base;
521 uint32_t clk_rate;
522 int ret;
523
524 /* Used to configure timings of the MAC */
525 ret = clock_control_get_rate(module_cfg->clock_dev, module_cfg->clock_subsys, &clk_rate);
526 if (ret) {
527 return ret;
528 }
529
530 /* For reporting the status of the link connection */
531 ret = phy_link_callback_set(config->phy_dev, eth_nxp_enet_qos_phy_cb, (void *)dev);
532 if (ret) {
533 return ret;
534 }
535
536 if (config->mac_addr_source == NXP_ENET_QOS_MAC_ADDR_SOURCE_LOCAL) {
537 /* Use the mac address provided in the devicetree */
538 } else if (config->mac_addr_source == NXP_ENET_QOS_MAC_ADDR_SOURCE_UNIQUE) {
539 nxp_enet_unique_mac(data->mac_addr.addr);
540 } else {
541 gen_random_mac(data->mac_addr.addr,
542 NXP_OUI_BYTE_0, NXP_OUI_BYTE_1, NXP_OUI_BYTE_2);
543 }
544
545 /* This driver cannot work without interrupts. */
546 if (config->irq_config_func) {
547 config->irq_config_func();
548 } else {
549 return -ENOSYS;
550 }
551
552 /* Effectively reset of the peripheral */
553 ret = enet_qos_dma_reset(base);
554 if (ret) {
555 return ret;
556 }
557
558 /* DMA is the interface presented to software for interaction by the ENET module */
559 enet_qos_dma_config_init(base);
560
561 /*
562 * MTL = MAC Translation Layer.
563 * MTL is an asynchronous circuit needed because the MAC transmitter/receiver
564 * and the DMA interface are on different clock domains, MTL compromises the two.
565 */
566 enet_qos_mtl_config_init(base);
567
568 /* Configuration of the actual MAC hardware */
569 enet_qos_mac_config_init(base, data, clk_rate);
570
571 /* Current use of TX descriptor in the driver is such that
572 * one packet is sent at a time, and each descriptor is used
573 * to collect the fragments of it from the networking stack,
574 * and send them with a zero copy implementation.
575 */
576 enet_qos_tx_desc_init(base, &data->tx);
577
578 /* Current use of RX descriptor in the driver is such that
579 * each RX descriptor corresponds to a reserved fragment, that will
580 * hold the entirety of the contents of a packet. And these fragments
581 * are recycled in and out of the RX pkt buf pool to achieve a zero copy implementation.
582 */
583 ret = enet_qos_rx_desc_init(base, &data->rx);
584 if (ret) {
585 return ret;
586 }
587
588 /* Clearly, start the cogs to motion. */
589 enet_qos_start(base);
590
591 /* The tx sem is taken during ethernet send function,
592 * and given when DMA transmission is finished. Ie, send calls will be blocked
593 * until the DMA is available again. This is therefore a simple but naive implementation.
594 */
595 k_sem_init(&data->tx.tx_sem, 1, 1);
596
597 /* Work upon a reception of a packet to a buffer */
598 k_work_init(&data->rx.rx_work, eth_nxp_enet_qos_rx);
599
600 /* Work upon a complete transmission by a channel's TX DMA */
601 k_work_init(&data->tx.tx_done_work, tx_dma_done);
602
603 return ret;
604 }
605
eth_nxp_enet_qos_get_phy(const struct device * dev)606 static const struct device *eth_nxp_enet_qos_get_phy(const struct device *dev)
607 {
608 const struct nxp_enet_qos_mac_config *config = dev->config;
609
610 return config->phy_dev;
611 }
612
613
614
eth_nxp_enet_qos_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * cfg)615 static int eth_nxp_enet_qos_set_config(const struct device *dev,
616 enum ethernet_config_type type,
617 const struct ethernet_config *cfg)
618 {
619 const struct nxp_enet_qos_mac_config *config = dev->config;
620 struct nxp_enet_qos_mac_data *data = dev->data;
621 struct nxp_enet_qos_config *module_cfg = ENET_QOS_MODULE_CFG(config->enet_dev);
622 enet_qos_t *base = module_cfg->base;
623
624 switch (type) {
625 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
626 memcpy(data->mac_addr.addr,
627 cfg->mac_address.addr,
628 sizeof(data->mac_addr.addr));
629 /* Set MAC address */
630 base->MAC_ADDRESS0_HIGH =
631 ENET_QOS_REG_PREP(MAC_ADDRESS0_HIGH, ADDRHI,
632 data->mac_addr.addr[5] << 8 |
633 data->mac_addr.addr[4]);
634 base->MAC_ADDRESS0_LOW =
635 ENET_QOS_REG_PREP(MAC_ADDRESS0_LOW, ADDRLO,
636 data->mac_addr.addr[3] << 24 |
637 data->mac_addr.addr[2] << 16 |
638 data->mac_addr.addr[1] << 8 |
639 data->mac_addr.addr[0]);
640 net_if_set_link_addr(data->iface, data->mac_addr.addr,
641 sizeof(data->mac_addr.addr),
642 NET_LINK_ETHERNET);
643 LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
644 dev->name,
645 data->mac_addr.addr[0], data->mac_addr.addr[1],
646 data->mac_addr.addr[2], data->mac_addr.addr[3],
647 data->mac_addr.addr[4], data->mac_addr.addr[5]);
648 return 0;
649 default:
650 break;
651 }
652
653 return -ENOTSUP;
654 }
655
656 static const struct ethernet_api api_funcs = {
657 .iface_api.init = eth_nxp_enet_qos_iface_init,
658 .send = eth_nxp_enet_qos_tx,
659 .get_capabilities = eth_nxp_enet_qos_get_capabilities,
660 .get_phy = eth_nxp_enet_qos_get_phy,
661 .set_config = eth_nxp_enet_qos_set_config,
662 };
663
664 #define NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \
665 BUILD_ASSERT(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)) || \
666 DT_INST_PROP(n, zephyr_random_mac_address) || \
667 DT_INST_PROP(n, nxp_unique_mac), \
668 "MAC address not specified on ENET QOS DT node");
669
670 #define NXP_ENET_QOS_MAC_ADDR_SOURCE(n) \
671 COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), local_mac_address), \
672 (NXP_ENET_QOS_MAC_ADDR_SOURCE_LOCAL), \
673 (COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \
674 (NXP_ENET_QOS_MAC_ADDR_SOURCE_RANDOM), \
675 (COND_CODE_1(DT_INST_PROP(n, nxp_unique_mac), \
676 (NXP_ENET_QOS_MAC_ADDR_SOURCE_UNIQUE), \
677 (NXP_ENET_QOS_MAC_ADDR_SOURCE_INVALID))))))
678
679 #define NXP_ENET_QOS_CONNECT_IRQS(node_id, prop, idx) \
680 do { \
681 IRQ_CONNECT(DT_IRQN_BY_IDX(node_id, idx), DT_IRQ_BY_IDX(node_id, idx, priority), \
682 eth_nxp_enet_qos_mac_isr, DEVICE_DT_GET(node_id), 0); \
683 irq_enable(DT_IRQN_BY_IDX(node_id, idx)); \
684 } while (false);
685
686 #define NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \
687 static void nxp_enet_qos_##n##_irq_config_func(void) \
688 { \
689 DT_FOREACH_PROP_ELEM(DT_DRV_INST(n), interrupt_names, NXP_ENET_QOS_CONNECT_IRQS) \
690 }
691 #define NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n) \
692 static const struct nxp_enet_qos_mac_config enet_qos_##n##_mac_config = { \
693 .enet_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \
694 .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \
695 .base = (enet_qos_t *)DT_REG_ADDR(DT_INST_PARENT(n)), \
696 .hw_info = \
697 { \
698 .max_frame_len = ENET_QOS_MAX_NORMAL_FRAME_LEN, \
699 }, \
700 .irq_config_func = nxp_enet_qos_##n##_irq_config_func, \
701 .mac_addr_source = NXP_ENET_QOS_MAC_ADDR_SOURCE(n), \
702 }; \
703 static struct nxp_enet_qos_mac_data enet_qos_##n##_mac_data = { \
704 .mac_addr.addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \
705 };
706
707 #define NXP_ENET_QOS_DRIVER_INIT(n) \
708 NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \
709 NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \
710 NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n)
711
712 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_DRIVER_INIT)
713
714 #define NXP_ENET_QOS_MAC_DEVICE_DEFINE(n) \
715 ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_qos_mac_init, NULL, \
716 &enet_qos_##n##_mac_data, &enet_qos_##n##_mac_config, \
717 CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU);
718
719 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_MAC_DEVICE_DEFINE)
720