1 /*
2 * Copyright (c) 2016 Piotr Mienkowski
3 * Copyright (c) 2018 Antmicro Ltd
4 * Copyright (c) 2023 Gerson Fernando Budke
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /** @file
10 * @brief Atmel SAM MCU family Ethernet MAC (GMAC) driver.
11 *
12 * This is a zero-copy networking implementation of an Ethernet driver. To
13 * prepare for the incoming frames the driver will permanently reserve a defined
14 * amount of RX data net buffers when the interface is brought up and thus
15 * reduce the total amount of RX data net buffers available to the application.
16 *
17 * Limitations:
18 * - one shot PHY setup, no support for PHY disconnect/reconnect
19 * - no statistics collection
20 */
21
22 #if defined(CONFIG_SOC_FAMILY_SAM)
23 #define DT_DRV_COMPAT atmel_sam_gmac
24 #else
25 #define DT_DRV_COMPAT atmel_sam0_gmac
26 #endif
27
28 #define LOG_MODULE_NAME eth_sam
29 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
30
31 #include <zephyr/logging/log.h>
32 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
33
34 #include <zephyr/kernel.h>
35 #include <zephyr/device.h>
36 #include <zephyr/sys/__assert.h>
37 #include <zephyr/sys/barrier.h>
38 #include <zephyr/sys/util.h>
39 #include <errno.h>
40 #include <stdbool.h>
41 #include <zephyr/net/phy.h>
42 #include <zephyr/net/net_pkt.h>
43 #include <zephyr/net/net_if.h>
44 #include <zephyr/net/ethernet.h>
45 #include <ethernet/eth_stats.h>
46 #include <zephyr/drivers/i2c.h>
47 #include <zephyr/drivers/pinctrl.h>
48 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
49 #include <soc.h>
50 #include "eth_sam_gmac_priv.h"
51
52 #include "eth.h"
53
54 #ifdef CONFIG_SOC_FAMILY_SAM0
55 #include "eth_sam0_gmac.h"
56 #endif
57
58 #include <zephyr/drivers/ptp_clock.h>
59 #include <zephyr/net/gptp.h>
60 #include <zephyr/irq.h>
61
62 #ifdef __DCACHE_PRESENT
63 static bool dcache_enabled;
64
dcache_is_enabled(void)65 static inline void dcache_is_enabled(void)
66 {
67 dcache_enabled = (SCB->CCR & SCB_CCR_DC_Msk);
68 }
dcache_invalidate(uint32_t addr,uint32_t size)69 static inline void dcache_invalidate(uint32_t addr, uint32_t size)
70 {
71 if (!dcache_enabled) {
72 return;
73 }
74
75 /* Make sure it is aligned to 32B */
76 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1);
77 uint32_t size_full = size + addr - start_addr;
78
79 SCB_InvalidateDCache_by_Addr((uint32_t *)start_addr, size_full);
80 }
81
dcache_clean(uint32_t addr,uint32_t size)82 static inline void dcache_clean(uint32_t addr, uint32_t size)
83 {
84 if (!dcache_enabled) {
85 return;
86 }
87
88 /* Make sure it is aligned to 32B */
89 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1);
90 uint32_t size_full = size + addr - start_addr;
91
92 SCB_CleanDCache_by_Addr((uint32_t *)start_addr, size_full);
93 }
94 #else
95 #define dcache_is_enabled()
96 #define dcache_invalidate(addr, size)
97 #define dcache_clean(addr, size)
98 #endif
99
100 #ifdef CONFIG_SOC_FAMILY_SAM0
101 #define MCK_FREQ_HZ SOC_ATMEL_SAM0_MCK_FREQ_HZ
102 #elif CONFIG_SOC_FAMILY_SAM
103 #define MCK_FREQ_HZ SOC_ATMEL_SAM_MCK_FREQ_HZ
104 #else
105 #error Unsupported SoC family
106 #endif
107
108 /*
109 * Verify Kconfig configuration
110 */
111 /* No need to verify things for unit tests */
112 #if !defined(CONFIG_NET_TEST)
113 #if CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT \
114 < GMAC_FRAME_SIZE_MAX
115 #error CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT is \
116 not large enough to hold a full frame
117 #endif
118
119 #if CONFIG_NET_BUF_DATA_SIZE * (CONFIG_NET_BUF_RX_COUNT - \
120 CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) < GMAC_FRAME_SIZE_MAX
121 #error (CONFIG_NET_BUF_RX_COUNT - CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) * \
122 CONFIG_NET_BUF_DATA_SIZE are not large enough to hold a full frame
123 #endif
124
125 #if CONFIG_NET_BUF_DATA_SIZE & 0x3F
126 #pragma message "CONFIG_NET_BUF_DATA_SIZE should be a multiple of 64 bytes " \
127 "due to the granularity of RX DMA"
128 #endif
129
130 #if (CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT + 1) * GMAC_ACTIVE_QUEUE_NUM \
131 > CONFIG_NET_BUF_RX_COUNT
132 #error Not enough RX buffers to allocate descriptors for each HW queue
133 #endif
134 #endif /* !CONFIG_NET_TEST */
135
136 BUILD_ASSERT(DT_INST_ENUM_IDX(0, phy_connection_type) <= 1, "Invalid PHY connection");
137
138 /* RX descriptors list */
139 static struct gmac_desc rx_desc_que0[MAIN_QUEUE_RX_DESC_COUNT]
140 __nocache __aligned(GMAC_DESC_ALIGNMENT);
141 #if GMAC_PRIORITY_QUEUE_NUM >= 1
142 static struct gmac_desc rx_desc_que1[PRIORITY_QUEUE1_RX_DESC_COUNT]
143 __nocache __aligned(GMAC_DESC_ALIGNMENT);
144 #endif
145 #if GMAC_PRIORITY_QUEUE_NUM >= 2
146 static struct gmac_desc rx_desc_que2[PRIORITY_QUEUE2_RX_DESC_COUNT]
147 __nocache __aligned(GMAC_DESC_ALIGNMENT);
148 #endif
149 #if GMAC_PRIORITY_QUEUE_NUM >= 3
150 static struct gmac_desc rx_desc_que3[PRIORITY_QUEUE3_RX_DESC_COUNT]
151 __nocache __aligned(GMAC_DESC_ALIGNMENT);
152 #endif
153 #if GMAC_PRIORITY_QUEUE_NUM >= 4
154 static struct gmac_desc rx_desc_que4[PRIORITY_QUEUE4_RX_DESC_COUNT]
155 __nocache __aligned(GMAC_DESC_ALIGNMENT);
156 #endif
157 #if GMAC_PRIORITY_QUEUE_NUM >= 5
158 static struct gmac_desc rx_desc_que5[PRIORITY_QUEUE5_RX_DESC_COUNT]
159 __nocache __aligned(GMAC_DESC_ALIGNMENT);
160 #endif
161
162 /* TX descriptors list */
163 static struct gmac_desc tx_desc_que0[MAIN_QUEUE_TX_DESC_COUNT]
164 __nocache __aligned(GMAC_DESC_ALIGNMENT);
165 #if GMAC_PRIORITY_QUEUE_NUM >= 1
166 static struct gmac_desc tx_desc_que1[PRIORITY_QUEUE1_TX_DESC_COUNT]
167 __nocache __aligned(GMAC_DESC_ALIGNMENT);
168 #endif
169 #if GMAC_PRIORITY_QUEUE_NUM >= 2
170 static struct gmac_desc tx_desc_que2[PRIORITY_QUEUE2_TX_DESC_COUNT]
171 __nocache __aligned(GMAC_DESC_ALIGNMENT);
172 #endif
173 #if GMAC_PRIORITY_QUEUE_NUM >= 3
174 static struct gmac_desc tx_desc_que3[PRIORITY_QUEUE3_TX_DESC_COUNT]
175 __nocache __aligned(GMAC_DESC_ALIGNMENT);
176 #endif
177 #if GMAC_PRIORITY_QUEUE_NUM >= 4
178 static struct gmac_desc tx_desc_que4[PRIORITY_QUEUE4_TX_DESC_COUNT]
179 __nocache __aligned(GMAC_DESC_ALIGNMENT);
180 #endif
181 #if GMAC_PRIORITY_QUEUE_NUM >= 5
182 static struct gmac_desc tx_desc_que5[PRIORITY_QUEUE5_TX_DESC_COUNT]
183 __nocache __aligned(GMAC_DESC_ALIGNMENT);
184 #endif
185
186 /* RX buffer accounting list */
187 static struct net_buf *rx_frag_list_que0[MAIN_QUEUE_RX_DESC_COUNT];
188 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
189 static struct net_buf *rx_frag_list_que1[PRIORITY_QUEUE1_RX_DESC_COUNT];
190 #endif
191 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
192 static struct net_buf *rx_frag_list_que2[PRIORITY_QUEUE2_RX_DESC_COUNT];
193 #endif
194 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
195 static struct net_buf *rx_frag_list_que3[PRIORITY_QUEUE3_RX_DESC_COUNT];
196 #endif
197 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
198 static struct net_buf *rx_frag_list_que4[PRIORITY_QUEUE4_RX_DESC_COUNT];
199 #endif
200 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
201 static struct net_buf *rx_frag_list_que5[PRIORITY_QUEUE5_RX_DESC_COUNT];
202 #endif
203
204 #if GMAC_MULTIPLE_TX_PACKETS == 1
205 /* TX buffer accounting list */
206 static struct net_buf *tx_frag_list_que0[MAIN_QUEUE_TX_DESC_COUNT];
207 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
208 static struct net_buf *tx_frag_list_que1[PRIORITY_QUEUE1_TX_DESC_COUNT];
209 #endif
210 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
211 static struct net_buf *tx_frag_list_que2[PRIORITY_QUEUE2_TX_DESC_COUNT];
212 #endif
213 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
214 static struct net_buf *tx_frag_list_que3[PRIORITY_QUEUE3_TX_DESC_COUNT];
215 #endif
216 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
217 static struct net_buf *tx_frag_list_que4[PRIORITY_QUEUE4_TX_DESC_COUNT];
218 #endif
219 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
220 static struct net_buf *tx_frag_list_que5[PRIORITY_QUEUE5_TX_DESC_COUNT];
221 #endif
222
223 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
224 /* TX frames accounting list */
225 static struct net_pkt *tx_frame_list_que0[CONFIG_NET_PKT_TX_COUNT + 1];
226 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
227 static struct net_pkt *tx_frame_list_que1[CONFIG_NET_PKT_TX_COUNT + 1];
228 #endif
229 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
230 static struct net_pkt *tx_frame_list_que2[CONFIG_NET_PKT_TX_COUNT + 1];
231 #endif
232 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
233 static struct net_pkt *tx_frame_list_que3[CONFIG_NET_PKT_TX_COUNT + 1];
234 #endif
235 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
236 static struct net_pkt *tx_frame_list_que4[CONFIG_NET_PKT_TX_COUNT + 1];
237 #endif
238 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
239 static struct net_pkt *tx_frame_list_que5[CONFIG_NET_PKT_TX_COUNT + 1];
240 #endif
241 #endif
242 #endif
243
244 #define MODULO_INC(val, max) {val = (++val < max) ? val : 0; }
245
246 static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue);
247 static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue);
248 static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue);
249
250 #if GMAC_PRIORITY_QUEUE_NUM >= 1
set_receive_buf_queue_pointer(Gmac * gmac,struct gmac_queue * queue)251 static inline void set_receive_buf_queue_pointer(Gmac *gmac,
252 struct gmac_queue *queue)
253 {
254 /* Set Receive Buffer Queue Pointer Register */
255 if (queue->que_idx == GMAC_QUE_0) {
256 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
257 } else {
258 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] =
259 (uint32_t)queue->rx_desc_list.buf;
260 }
261 }
262
disable_all_priority_queue_interrupt(Gmac * gmac)263 static inline void disable_all_priority_queue_interrupt(Gmac *gmac)
264 {
265 uint32_t idx;
266
267 for (idx = 0; idx < GMAC_PRIORITY_QUEUE_NUM; idx++) {
268 gmac->GMAC_IDRPQ[idx] = UINT32_MAX;
269 (void)gmac->GMAC_ISRPQ[idx];
270 }
271 }
272
priority_queue_init(Gmac * gmac,struct gmac_queue * queue)273 static int priority_queue_init(Gmac *gmac, struct gmac_queue *queue)
274 {
275 int result;
276 int queue_index;
277
278 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0);
279 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0);
280 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk),
281 "RX descriptors have to be word aligned");
282 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk),
283 "TX descriptors have to be word aligned");
284
285 /* Extract queue index for easier referencing */
286 queue_index = queue->que_idx - 1;
287
288 /* Setup descriptor lists */
289 result = rx_descriptors_init(gmac, queue);
290 if (result < 0) {
291 return result;
292 }
293
294 tx_descriptors_init(gmac, queue);
295
296 #if GMAC_MULTIPLE_TX_PACKETS == 0
297 k_sem_init(&queue->tx_sem, 0, 1);
298 #else
299 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
300 queue->tx_desc_list.len - 1);
301 #endif
302
303 /* Setup RX buffer size for DMA */
304 gmac->GMAC_RBSRPQ[queue_index] =
305 GMAC_RBSRPQ_RBS(CONFIG_NET_BUF_DATA_SIZE >> 6);
306
307 /* Set Receive Buffer Queue Pointer Register */
308 gmac->GMAC_RBQBAPQ[queue_index] = (uint32_t)queue->rx_desc_list.buf;
309 /* Set Transmit Buffer Queue Pointer Register */
310 gmac->GMAC_TBQBAPQ[queue_index] = (uint32_t)queue->tx_desc_list.buf;
311
312 /* Enable RX/TX completion and error interrupts */
313 gmac->GMAC_IERPQ[queue_index] = GMAC_INTPQ_EN_FLAGS;
314
315 queue->err_rx_frames_dropped = 0U;
316 queue->err_rx_flushed_count = 0U;
317 queue->err_tx_flushed_count = 0U;
318
319 LOG_INF("Queue %d activated", queue->que_idx);
320
321 return 0;
322 }
323
priority_queue_init_as_idle(Gmac * gmac,struct gmac_queue * queue)324 static int priority_queue_init_as_idle(Gmac *gmac, struct gmac_queue *queue)
325 {
326 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
327 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
328
329 __ASSERT(!((uint32_t)rx_desc_list->buf & ~GMAC_RBQB_ADDR_Msk),
330 "RX descriptors have to be word aligned");
331 __ASSERT(!((uint32_t)tx_desc_list->buf & ~GMAC_TBQB_ADDR_Msk),
332 "TX descriptors have to be word aligned");
333 __ASSERT((rx_desc_list->len == 1U) && (tx_desc_list->len == 1U),
334 "Priority queues are currently not supported, descriptor "
335 "list has to have a single entry");
336
337 /* Setup RX descriptor lists */
338 /* Take ownership from GMAC and set the wrap bit */
339 rx_desc_list->buf[0].w0 = GMAC_RXW0_WRAP;
340 rx_desc_list->buf[0].w1 = 0U;
341 /* Setup TX descriptor lists */
342 tx_desc_list->buf[0].w0 = 0U;
343 /* Take ownership from GMAC and set the wrap bit */
344 tx_desc_list->buf[0].w1 = GMAC_TXW1_USED | GMAC_TXW1_WRAP;
345
346 /* Set Receive Buffer Queue Pointer Register */
347 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = (uint32_t)rx_desc_list->buf;
348 /* Set Transmit Buffer Queue Pointer Register */
349 gmac->GMAC_TBQBAPQ[queue->que_idx - 1] = (uint32_t)tx_desc_list->buf;
350
351 LOG_INF("Queue %d set to idle", queue->que_idx);
352
353 return 0;
354 }
355
queue_init(Gmac * gmac,struct gmac_queue * queue)356 static int queue_init(Gmac *gmac, struct gmac_queue *queue)
357 {
358 if (queue->que_idx == GMAC_QUE_0) {
359 return nonpriority_queue_init(gmac, queue);
360 } else if (queue->que_idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
361 return priority_queue_init(gmac, queue);
362 } else {
363 return priority_queue_init_as_idle(gmac, queue);
364 }
365 }
366
367 #else
368
set_receive_buf_queue_pointer(Gmac * gmac,struct gmac_queue * queue)369 static inline void set_receive_buf_queue_pointer(Gmac *gmac,
370 struct gmac_queue *queue)
371 {
372 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
373 }
374
queue_init(Gmac * gmac,struct gmac_queue * queue)375 static int queue_init(Gmac *gmac, struct gmac_queue *queue)
376 {
377 return nonpriority_queue_init(gmac, queue);
378 }
379
380 #define disable_all_priority_queue_interrupt(gmac)
381
382 #endif
383
384 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
385 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable);
386
eth_sam_gmac_init_qav(Gmac * gmac)387 static inline void eth_sam_gmac_init_qav(Gmac *gmac)
388 {
389 uint32_t idx;
390
391 for (idx = GMAC_QUE_1; idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM; idx++) {
392 eth_sam_gmac_setup_qav(gmac, idx, true);
393 }
394 }
395
396 #else
397
398 #define eth_sam_gmac_init_qav(gmac)
399
400 #endif
401
402 #if GMAC_MULTIPLE_TX_PACKETS == 1
403 /*
404 * Reset ring buffer
405 */
ring_buf_reset(struct ring_buf * rb)406 static void ring_buf_reset(struct ring_buf *rb)
407 {
408 rb->head = 0U;
409 rb->tail = 0U;
410 }
411
412 /*
413 * Get one 32 bit item from the ring buffer
414 */
ring_buf_get(struct ring_buf * rb)415 static uint32_t ring_buf_get(struct ring_buf *rb)
416 {
417 uint32_t val;
418
419 __ASSERT(rb->tail != rb->head,
420 "retrieving data from empty ring buffer");
421
422 val = rb->buf[rb->tail];
423 MODULO_INC(rb->tail, rb->len);
424
425 return val;
426 }
427
428 /*
429 * Put one 32 bit item into the ring buffer
430 */
ring_buf_put(struct ring_buf * rb,uint32_t val)431 static void ring_buf_put(struct ring_buf *rb, uint32_t val)
432 {
433 rb->buf[rb->head] = val;
434 MODULO_INC(rb->head, rb->len);
435
436 __ASSERT(rb->tail != rb->head,
437 "ring buffer overflow");
438 }
439 #endif
440
441 /*
442 * Free pre-reserved RX buffers
443 */
free_rx_bufs(struct net_buf ** rx_frag_list,uint16_t len)444 static void free_rx_bufs(struct net_buf **rx_frag_list, uint16_t len)
445 {
446 for (int i = 0; i < len; i++) {
447 if (rx_frag_list[i]) {
448 net_buf_unref(rx_frag_list[i]);
449 rx_frag_list[i] = NULL;
450 }
451 }
452 }
453
454 /*
455 * Set MAC Address for frame filtering logic
456 */
mac_addr_set(Gmac * gmac,uint8_t index,uint8_t mac_addr[6])457 static void mac_addr_set(Gmac *gmac, uint8_t index,
458 uint8_t mac_addr[6])
459 {
460 __ASSERT(index < 4, "index has to be in the range 0..3");
461
462 gmac->GMAC_SA[index].GMAC_SAB = (mac_addr[3] << 24)
463 | (mac_addr[2] << 16)
464 | (mac_addr[1] << 8)
465 | (mac_addr[0]);
466 gmac->GMAC_SA[index].GMAC_SAT = (mac_addr[5] << 8)
467 | (mac_addr[4]);
468 }
469
470 /*
471 * Initialize RX descriptor list
472 */
rx_descriptors_init(Gmac * gmac,struct gmac_queue * queue)473 static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
474 {
475 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
476 struct net_buf **rx_frag_list = queue->rx_frag_list;
477 struct net_buf *rx_buf;
478 uint8_t *rx_buf_addr;
479
480 __ASSERT_NO_MSG(rx_frag_list);
481
482 rx_desc_list->tail = 0U;
483
484 for (int i = 0; i < rx_desc_list->len; i++) {
485 rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE,
486 K_NO_WAIT);
487 if (rx_buf == NULL) {
488 free_rx_bufs(rx_frag_list, rx_desc_list->len);
489 LOG_ERR("Failed to reserve data net buffers");
490 return -ENOBUFS;
491 }
492
493 rx_frag_list[i] = rx_buf;
494
495 rx_buf_addr = rx_buf->data;
496 __ASSERT(!((uint32_t)rx_buf_addr & ~GMAC_RXW0_ADDR),
497 "Misaligned RX buffer address");
498 __ASSERT(rx_buf->size == CONFIG_NET_BUF_DATA_SIZE,
499 "Incorrect length of RX data buffer");
500 /* Give ownership to GMAC and remove the wrap bit */
501 rx_desc_list->buf[i].w0 = (uint32_t)rx_buf_addr & GMAC_RXW0_ADDR;
502 rx_desc_list->buf[i].w1 = 0U;
503 }
504
505 /* Set the wrap bit on the last descriptor */
506 rx_desc_list->buf[rx_desc_list->len - 1U].w0 |= GMAC_RXW0_WRAP;
507
508 return 0;
509 }
510
511 /*
512 * Initialize TX descriptor list
513 */
tx_descriptors_init(Gmac * gmac,struct gmac_queue * queue)514 static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
515 {
516 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
517
518 tx_desc_list->head = 0U;
519 tx_desc_list->tail = 0U;
520
521 for (int i = 0; i < tx_desc_list->len; i++) {
522 tx_desc_list->buf[i].w0 = 0U;
523 tx_desc_list->buf[i].w1 = GMAC_TXW1_USED;
524 }
525
526 /* Set the wrap bit on the last descriptor */
527 tx_desc_list->buf[tx_desc_list->len - 1U].w1 |= GMAC_TXW1_WRAP;
528
529 #if GMAC_MULTIPLE_TX_PACKETS == 1
530 /* Reset TX frame list */
531 ring_buf_reset(&queue->tx_frag_list);
532 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
533 ring_buf_reset(&queue->tx_frames);
534 #endif
535 #endif
536 }
537
538 #if defined(CONFIG_NET_GPTP)
check_gptp_msg(struct net_if * iface,struct net_pkt * pkt,bool is_tx)539 static struct gptp_hdr *check_gptp_msg(struct net_if *iface,
540 struct net_pkt *pkt,
541 bool is_tx)
542 {
543 uint8_t *msg_start = net_pkt_data(pkt);
544 struct ethernet_context *eth_ctx;
545 struct gptp_hdr *gptp_hdr;
546 int eth_hlen;
547
548 #if defined(CONFIG_NET_VLAN)
549 eth_ctx = net_if_l2_data(iface);
550 if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
551 struct net_eth_vlan_hdr *hdr_vlan;
552
553 hdr_vlan = (struct net_eth_vlan_hdr *)msg_start;
554 if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) {
555 return NULL;
556 }
557
558 eth_hlen = sizeof(struct net_eth_vlan_hdr);
559 } else
560 #else
561 ARG_UNUSED(eth_ctx);
562 #endif
563 {
564 struct net_eth_hdr *hdr;
565
566 hdr = (struct net_eth_hdr *)msg_start;
567 if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) {
568 return NULL;
569 }
570
571 eth_hlen = sizeof(struct net_eth_hdr);
572 }
573
574 /* In TX, the first net_buf contains the Ethernet header
575 * and the actual gPTP header is in the second net_buf.
576 * In RX, the Ethernet header + other headers are in the
577 * first net_buf.
578 */
579 if (is_tx) {
580 if (pkt->frags->frags == NULL) {
581 return false;
582 }
583
584 gptp_hdr = (struct gptp_hdr *)pkt->frags->frags->data;
585 } else {
586 gptp_hdr = (struct gptp_hdr *)(pkt->frags->data + eth_hlen);
587 }
588
589 return gptp_hdr;
590 }
591
need_timestamping(struct gptp_hdr * hdr)592 static bool need_timestamping(struct gptp_hdr *hdr)
593 {
594 switch (hdr->message_type) {
595 case GPTP_SYNC_MESSAGE:
596 case GPTP_PATH_DELAY_RESP_MESSAGE:
597 return true;
598 default:
599 return false;
600 }
601 }
602
update_pkt_priority(struct gptp_hdr * hdr,struct net_pkt * pkt)603 static void update_pkt_priority(struct gptp_hdr *hdr, struct net_pkt *pkt)
604 {
605 if (GPTP_IS_EVENT_MSG(hdr->message_type)) {
606 net_pkt_set_priority(pkt, NET_PRIORITY_CA);
607 } else {
608 net_pkt_set_priority(pkt, NET_PRIORITY_IC);
609 }
610 }
611
get_ptp_event_rx_ts(Gmac * gmac)612 static inline struct net_ptp_time get_ptp_event_rx_ts(Gmac *gmac)
613 {
614 struct net_ptp_time ts;
615
616 ts.second = ((uint64_t)(gmac->GMAC_EFRSH & 0xffff) << 32)
617 | gmac->GMAC_EFRSL;
618 ts.nanosecond = gmac->GMAC_EFRN;
619
620 return ts;
621 }
622
get_ptp_peer_event_rx_ts(Gmac * gmac)623 static inline struct net_ptp_time get_ptp_peer_event_rx_ts(Gmac *gmac)
624 {
625 struct net_ptp_time ts;
626
627 ts.second = ((uint64_t)(gmac->GMAC_PEFRSH & 0xffff) << 32)
628 | gmac->GMAC_PEFRSL;
629 ts.nanosecond = gmac->GMAC_PEFRN;
630
631 return ts;
632 }
633
get_ptp_event_tx_ts(Gmac * gmac)634 static inline struct net_ptp_time get_ptp_event_tx_ts(Gmac *gmac)
635 {
636 struct net_ptp_time ts;
637
638 ts.second = ((uint64_t)(gmac->GMAC_EFTSH & 0xffff) << 32)
639 | gmac->GMAC_EFTSL;
640 ts.nanosecond = gmac->GMAC_EFTN;
641
642 return ts;
643 }
644
get_ptp_peer_event_tx_ts(Gmac * gmac)645 static inline struct net_ptp_time get_ptp_peer_event_tx_ts(Gmac *gmac)
646 {
647 struct net_ptp_time ts;
648
649 ts.second = ((uint64_t)(gmac->GMAC_PEFTSH & 0xffff) << 32)
650 | gmac->GMAC_PEFTSL;
651 ts.nanosecond = gmac->GMAC_PEFTN;
652
653 return ts;
654 }
655
get_current_ts(Gmac * gmac)656 static inline struct net_ptp_time get_current_ts(Gmac *gmac)
657 {
658 struct net_ptp_time ts;
659
660 ts.second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL;
661 ts.nanosecond = gmac->GMAC_TN;
662
663 return ts;
664 }
665
666
timestamp_tx_pkt(Gmac * gmac,struct gptp_hdr * hdr,struct net_pkt * pkt)667 static inline void timestamp_tx_pkt(Gmac *gmac, struct gptp_hdr *hdr,
668 struct net_pkt *pkt)
669 {
670 struct net_ptp_time timestamp;
671
672 if (hdr) {
673 switch (hdr->message_type) {
674 case GPTP_SYNC_MESSAGE:
675 timestamp = get_ptp_event_tx_ts(gmac);
676 break;
677 default:
678 timestamp = get_ptp_peer_event_tx_ts(gmac);
679 }
680 } else {
681 timestamp = get_current_ts(gmac);
682 }
683
684 net_pkt_set_timestamp(pkt, ×tamp);
685 }
686
timestamp_rx_pkt(Gmac * gmac,struct gptp_hdr * hdr,struct net_pkt * pkt)687 static inline void timestamp_rx_pkt(Gmac *gmac, struct gptp_hdr *hdr,
688 struct net_pkt *pkt)
689 {
690 struct net_ptp_time timestamp;
691
692 if (hdr) {
693 switch (hdr->message_type) {
694 case GPTP_SYNC_MESSAGE:
695 timestamp = get_ptp_event_rx_ts(gmac);
696 break;
697 default:
698 timestamp = get_ptp_peer_event_rx_ts(gmac);
699 }
700 } else {
701 timestamp = get_current_ts(gmac);
702 }
703
704 net_pkt_set_timestamp(pkt, ×tamp);
705 }
706
707 #endif
708
get_iface(struct eth_sam_dev_data * ctx,uint16_t vlan_tag)709 static inline struct net_if *get_iface(struct eth_sam_dev_data *ctx,
710 uint16_t vlan_tag)
711 {
712 #if defined(CONFIG_NET_VLAN)
713 struct net_if *iface;
714
715 iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
716 if (!iface) {
717 return ctx->iface;
718 }
719
720 return iface;
721 #else
722 ARG_UNUSED(vlan_tag);
723
724 return ctx->iface;
725 #endif
726 }
727
728 /*
729 * Process successfully sent packets
730 */
tx_completed(Gmac * gmac,struct gmac_queue * queue)731 static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
732 {
733 #if GMAC_MULTIPLE_TX_PACKETS == 0
734 k_sem_give(&queue->tx_sem);
735 #else
736 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
737 struct gmac_desc *tx_desc;
738 struct net_buf *frag;
739 #if defined(CONFIG_NET_GPTP)
740 struct net_pkt *pkt;
741 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
742 struct gptp_hdr *hdr;
743 struct eth_sam_dev_data *dev_data =
744 CONTAINER_OF(queue, struct eth_sam_dev_data,
745 queue_list[queue->que_idx]);
746 #endif
747
748 __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED,
749 "first buffer of a frame is not marked as own by GMAC");
750
751 while (tx_desc_list->tail != tx_desc_list->head) {
752
753 tx_desc = &tx_desc_list->buf[tx_desc_list->tail];
754 MODULO_INC(tx_desc_list->tail, tx_desc_list->len);
755 k_sem_give(&queue->tx_desc_sem);
756
757 /* Release net buffer to the buffer pool */
758 frag = UINT_TO_POINTER(ring_buf_get(&queue->tx_frag_list));
759 net_pkt_frag_unref(frag);
760 LOG_DBG("Dropping frag %p", frag);
761
762 if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) {
763 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
764 /* Release net packet to the packet pool */
765 pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames));
766 #if defined(CONFIG_NET_VLAN)
767 struct net_eth_hdr *eth_hdr = NET_ETH_HDR(pkt);
768
769 if (ntohs(eth_hdr->type) == NET_ETH_PTYPE_VLAN) {
770 vlan_tag = net_pkt_vlan_tag(pkt);
771 }
772 #endif
773 #if defined(CONFIG_NET_GPTP)
774 hdr = check_gptp_msg(get_iface(dev_data, vlan_tag),
775 pkt, true);
776
777 timestamp_tx_pkt(gmac, hdr, pkt);
778
779 if (hdr && need_timestamping(hdr)) {
780 net_if_add_tx_timestamp(pkt);
781 }
782 #endif
783 net_pkt_unref(pkt);
784 LOG_DBG("Dropping pkt %p", pkt);
785 #endif
786 break;
787 }
788 }
789 #endif
790 }
791
792 /*
793 * Reset TX queue when errors are detected
794 */
tx_error_handler(Gmac * gmac,struct gmac_queue * queue)795 static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
796 {
797 #if GMAC_MULTIPLE_TX_PACKETS == 1
798 struct net_buf *frag;
799 struct ring_buf *tx_frag_list = &queue->tx_frag_list;
800 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
801 struct net_pkt *pkt;
802 struct ring_buf *tx_frames = &queue->tx_frames;
803 #endif
804 #endif
805
806 queue->err_tx_flushed_count++;
807
808 /* Stop transmission, clean transmit pipeline and control registers */
809 gmac->GMAC_NCR &= ~GMAC_NCR_TXEN;
810
811 #if GMAC_MULTIPLE_TX_PACKETS == 1
812 /* Free all frag resources in the TX path */
813 while (tx_frag_list->tail != tx_frag_list->head) {
814 /* Release net buffer to the buffer pool */
815 frag = UINT_TO_POINTER(tx_frag_list->buf[tx_frag_list->tail]);
816 net_pkt_frag_unref(frag);
817 LOG_DBG("Dropping frag %p", frag);
818 MODULO_INC(tx_frag_list->tail, tx_frag_list->len);
819 }
820
821 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
822 /* Free all pkt resources in the TX path */
823 while (tx_frames->tail != tx_frames->head) {
824 /* Release net packet to the packet pool */
825 pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]);
826 net_pkt_unref(pkt);
827 LOG_DBG("Dropping pkt %p", pkt);
828 MODULO_INC(tx_frames->tail, tx_frames->len);
829 }
830 #endif
831
832 /* Reinitialize TX descriptor list */
833 k_sem_reset(&queue->tx_desc_sem);
834 for (int i = 0; i < queue->tx_desc_list.len - 1; i++) {
835 k_sem_give(&queue->tx_desc_sem);
836 }
837 #endif
838 tx_descriptors_init(gmac, queue);
839
840 #if GMAC_MULTIPLE_TX_PACKETS == 0
841 /* Reinitialize TX mutex */
842 k_sem_give(&queue->tx_sem);
843 #endif
844
845 /* Restart transmission */
846 gmac->GMAC_NCR |= GMAC_NCR_TXEN;
847 }
848
849 /*
850 * Clean RX queue, any received data still stored in the buffers is abandoned.
851 */
rx_error_handler(Gmac * gmac,struct gmac_queue * queue)852 static void rx_error_handler(Gmac *gmac, struct gmac_queue *queue)
853 {
854 queue->err_rx_flushed_count++;
855
856 /* Stop reception */
857 gmac->GMAC_NCR &= ~GMAC_NCR_RXEN;
858
859 queue->rx_desc_list.tail = 0U;
860
861 for (int i = 0; i < queue->rx_desc_list.len; i++) {
862 queue->rx_desc_list.buf[i].w1 = 0U;
863 queue->rx_desc_list.buf[i].w0 &= ~GMAC_RXW0_OWNERSHIP;
864 }
865
866 set_receive_buf_queue_pointer(gmac, queue);
867
868 /* Restart reception */
869 gmac->GMAC_NCR |= GMAC_NCR_RXEN;
870 }
871
872 /*
873 * Set MCK to MDC clock divisor.
874 *
875 * According to 802.3 MDC should be less then 2.5 MHz.
876 */
get_mck_clock_divisor(uint32_t mck)877 static int get_mck_clock_divisor(uint32_t mck)
878 {
879 uint32_t mck_divisor;
880
881 if (mck <= 20000000U) {
882 mck_divisor = GMAC_NCFGR_CLK_MCK_8;
883 } else if (mck <= 40000000U) {
884 mck_divisor = GMAC_NCFGR_CLK_MCK_16;
885 } else if (mck <= 80000000U) {
886 mck_divisor = GMAC_NCFGR_CLK_MCK_32;
887 } else if (mck <= 120000000U) {
888 mck_divisor = GMAC_NCFGR_CLK_MCK_48;
889 } else if (mck <= 160000000U) {
890 mck_divisor = GMAC_NCFGR_CLK_MCK_64;
891 } else if (mck <= 240000000U) {
892 mck_divisor = GMAC_NCFGR_CLK_MCK_96;
893 } else {
894 LOG_ERR("No valid MDC clock");
895 mck_divisor = -ENOTSUP;
896 }
897
898 return mck_divisor;
899 }
900
901 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_setup_qav(Gmac * gmac,int queue_id,bool enable)902 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable)
903 {
904 /* Verify queue id */
905 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
906 return -EINVAL;
907 }
908
909 if (queue_id == GMAC_QUE_2) {
910 if (enable) {
911 gmac->GMAC_CBSCR |= GMAC_CBSCR_QAE;
912 } else {
913 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE;
914 }
915 } else {
916 if (enable) {
917 gmac->GMAC_CBSCR |= GMAC_CBSCR_QBE;
918 } else {
919 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE;
920 }
921 }
922
923 return 0;
924 }
925
eth_sam_gmac_get_qav_status(Gmac * gmac,int queue_id,bool * enabled)926 static int eth_sam_gmac_get_qav_status(Gmac *gmac, int queue_id, bool *enabled)
927 {
928 /* Verify queue id */
929 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
930 return -EINVAL;
931 }
932
933 if (queue_id == GMAC_QUE_2) {
934 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QAE;
935 } else {
936 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QBE;
937 }
938
939 return 0;
940 }
941
eth_sam_gmac_setup_qav_idle_slope(Gmac * gmac,int queue_id,unsigned int idle_slope)942 static int eth_sam_gmac_setup_qav_idle_slope(Gmac *gmac, int queue_id,
943 unsigned int idle_slope)
944 {
945 uint32_t cbscr_val;
946
947 /* Verify queue id */
948 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
949 return -EINVAL;
950 }
951
952 cbscr_val = gmac->GMAC_CBSISQA;
953
954 if (queue_id == GMAC_QUE_2) {
955 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE;
956 gmac->GMAC_CBSISQA = idle_slope;
957 } else {
958 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE;
959 gmac->GMAC_CBSISQB = idle_slope;
960 }
961
962 gmac->GMAC_CBSCR = cbscr_val;
963
964 return 0;
965 }
966
eth_sam_gmac_get_bandwidth(Gmac * gmac)967 static uint32_t eth_sam_gmac_get_bandwidth(Gmac *gmac)
968 {
969 uint32_t bandwidth;
970
971 /* See if we operate in 10Mbps or 100Mbps mode,
972 * Note: according to the manual, portTransmitRate is 0x07735940 for
973 * 1Gbps - therefore we cannot use the KB/MB macros - we have to
974 * multiply it by a round 1000 to get it right.
975 */
976 if (gmac->GMAC_NCFGR & GMAC_NCFGR_SPD) {
977 /* 100Mbps */
978 bandwidth = (100 * 1000 * 1000) / 8;
979 } else {
980 /* 10Mbps */
981 bandwidth = (10 * 1000 * 1000) / 8;
982 }
983
984 return bandwidth;
985 }
986
eth_sam_gmac_get_qav_idle_slope(Gmac * gmac,int queue_id,unsigned int * idle_slope)987 static int eth_sam_gmac_get_qav_idle_slope(Gmac *gmac, int queue_id,
988 unsigned int *idle_slope)
989 {
990 /* Verify queue id */
991 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
992 return -EINVAL;
993 }
994
995 if (queue_id == GMAC_QUE_2) {
996 *idle_slope = gmac->GMAC_CBSISQA;
997 } else {
998 *idle_slope = gmac->GMAC_CBSISQB;
999 }
1000
1001 /* Convert to bps as expected by upper layer */
1002 *idle_slope *= 8U;
1003
1004 return 0;
1005 }
1006
eth_sam_gmac_get_qav_delta_bandwidth(Gmac * gmac,int queue_id,unsigned int * delta_bandwidth)1007 static int eth_sam_gmac_get_qav_delta_bandwidth(Gmac *gmac, int queue_id,
1008 unsigned int *delta_bandwidth)
1009 {
1010 uint32_t bandwidth;
1011 unsigned int idle_slope;
1012 int ret;
1013
1014 ret = eth_sam_gmac_get_qav_idle_slope(gmac, queue_id, &idle_slope);
1015 if (ret) {
1016 return ret;
1017 }
1018
1019 /* Calculate in Bps */
1020 idle_slope /= 8U;
1021
1022 /* Get bandwidth and convert to bps */
1023 bandwidth = eth_sam_gmac_get_bandwidth(gmac);
1024
1025 /* Calculate percentage - instead of multiplying idle_slope by 100,
1026 * divide bandwidth - these numbers are so large that it should not
1027 * influence the outcome and saves us from employing larger data types.
1028 */
1029 *delta_bandwidth = idle_slope / (bandwidth / 100U);
1030
1031 return 0;
1032 }
1033
eth_sam_gmac_setup_qav_delta_bandwidth(Gmac * gmac,int queue_id,int queue_share)1034 static int eth_sam_gmac_setup_qav_delta_bandwidth(Gmac *gmac, int queue_id,
1035 int queue_share)
1036 {
1037 uint32_t bandwidth;
1038 uint32_t idle_slope;
1039
1040 /* Verify queue id */
1041 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
1042 return -EINVAL;
1043 }
1044
1045 bandwidth = eth_sam_gmac_get_bandwidth(gmac);
1046
1047 idle_slope = (bandwidth * queue_share) / 100U;
1048
1049 return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id, idle_slope);
1050 }
1051 #endif
1052
1053 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
gmac_setup_ptp_clock_divisors(Gmac * gmac)1054 static void gmac_setup_ptp_clock_divisors(Gmac *gmac)
1055 {
1056 int mck_divs[] = {10, 5, 2};
1057 double min_cycles;
1058 double min_period;
1059 int div;
1060 int i;
1061
1062 uint8_t cns, acns, nit;
1063
1064 min_cycles = MCK_FREQ_HZ;
1065 min_period = NSEC_PER_SEC;
1066
1067 for (i = 0; i < ARRAY_SIZE(mck_divs); ++i) {
1068 div = mck_divs[i];
1069 while ((double)(min_cycles / div) == (int)(min_cycles / div) &&
1070 (double)(min_period / div) == (int)(min_period / div)) {
1071 min_cycles /= div;
1072 min_period /= div;
1073 }
1074 }
1075
1076 nit = min_cycles - 1;
1077 cns = 0U;
1078 acns = 0U;
1079
1080 while ((cns + 2) * nit < min_period) {
1081 cns++;
1082 }
1083
1084 acns = min_period - (nit * cns);
1085
1086 gmac->GMAC_TI =
1087 GMAC_TI_CNS(cns) | GMAC_TI_ACNS(acns) | GMAC_TI_NIT(nit);
1088 gmac->GMAC_TISUBN = 0;
1089 }
1090 #endif
1091
gmac_init(Gmac * gmac,uint32_t gmac_ncfgr_val)1092 static int gmac_init(Gmac *gmac, uint32_t gmac_ncfgr_val)
1093 {
1094 int mck_divisor;
1095
1096 mck_divisor = get_mck_clock_divisor(MCK_FREQ_HZ);
1097 if (mck_divisor < 0) {
1098 return mck_divisor;
1099 }
1100
1101 /* Set Network Control Register to its default value, clear stats. */
1102 gmac->GMAC_NCR = GMAC_NCR_CLRSTAT | GMAC_NCR_MPE;
1103
1104 /* Disable all interrupts */
1105 gmac->GMAC_IDR = UINT32_MAX;
1106 /* Clear all interrupts */
1107 (void)gmac->GMAC_ISR;
1108 disable_all_priority_queue_interrupt(gmac);
1109
1110 /* Setup Hash Registers - enable reception of all multicast frames when
1111 * GMAC_NCFGR_MTIHEN is set.
1112 */
1113 gmac->GMAC_HRB = UINT32_MAX;
1114 gmac->GMAC_HRT = UINT32_MAX;
1115 /* Setup Network Configuration Register */
1116 gmac->GMAC_NCFGR = gmac_ncfgr_val | mck_divisor;
1117
1118 /* Default (RMII) is defined at atmel,gmac-common.yaml file */
1119 switch (DT_INST_ENUM_IDX(0, phy_connection_type)) {
1120 case 0: /* mii */
1121 gmac->GMAC_UR = 0x1;
1122 break;
1123 case 1: /* rmii */
1124 gmac->GMAC_UR = 0x0;
1125 break;
1126 default:
1127 /* Build assert at top of file should catch this case */
1128 LOG_ERR("The phy connection type is invalid");
1129
1130 return -EINVAL;
1131 }
1132
1133 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1134 /* Initialize PTP Clock Registers */
1135 gmac_setup_ptp_clock_divisors(gmac);
1136
1137 gmac->GMAC_TN = 0;
1138 gmac->GMAC_TSH = 0;
1139 gmac->GMAC_TSL = 0;
1140 #endif
1141
1142 /* Enable Qav if priority queues are used, and setup the default delta
1143 * bandwidth according to IEEE802.1Qav (34.3.1)
1144 */
1145 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 1
1146 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 75);
1147 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 2
1148 /* For multiple priority queues, 802.1Qav suggests using 75% for the
1149 * highest priority queue, and 0% for the lower priority queues.
1150 * This is because the lower priority queues are supposed to be using
1151 * the bandwidth available from the higher priority queues AND its own
1152 * available bandwidth (see 802.1Q 34.3.1 for more details).
1153 * This does not work like that in SAM GMAC - the lower priority queues
1154 * are not using the bandwidth reserved for the higher priority queues
1155 * at all. Thus we still set the default to a total of the recommended
1156 * 75%, but split the bandwidth between them manually.
1157 */
1158 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25);
1159 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 50);
1160 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 3
1161 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25);
1162 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 25);
1163 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 25);
1164 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 4
1165 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 21);
1166 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 18);
1167 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 18);
1168 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 18);
1169 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 5
1170 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 15);
1171 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 15);
1172 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 15);
1173 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 15);
1174 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 5, 15);
1175 #endif
1176
1177 eth_sam_gmac_init_qav(gmac);
1178
1179 return 0;
1180 }
1181
link_configure(Gmac * gmac,bool full_duplex,bool speed_100M)1182 static void link_configure(Gmac *gmac, bool full_duplex, bool speed_100M)
1183 {
1184 uint32_t val;
1185
1186 val = gmac->GMAC_NCFGR;
1187
1188 val &= ~(GMAC_NCFGR_FD | GMAC_NCFGR_SPD);
1189 val |= (full_duplex) ? GMAC_NCFGR_FD : 0;
1190 val |= (speed_100M) ? GMAC_NCFGR_SPD : 0;
1191
1192 gmac->GMAC_NCFGR = val;
1193
1194 gmac->GMAC_NCR |= (GMAC_NCR_RXEN | GMAC_NCR_TXEN);
1195 }
1196
nonpriority_queue_init(Gmac * gmac,struct gmac_queue * queue)1197 static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue)
1198 {
1199 int result;
1200
1201 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0);
1202 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0);
1203 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk),
1204 "RX descriptors have to be word aligned");
1205 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk),
1206 "TX descriptors have to be word aligned");
1207
1208 /* Setup descriptor lists */
1209 result = rx_descriptors_init(gmac, queue);
1210 if (result < 0) {
1211 return result;
1212 }
1213
1214 tx_descriptors_init(gmac, queue);
1215
1216 #if GMAC_MULTIPLE_TX_PACKETS == 0
1217 /* Initialize TX semaphore. This semaphore is used to wait until the TX
1218 * data has been sent.
1219 */
1220 k_sem_init(&queue->tx_sem, 0, 1);
1221 #else
1222 /* Initialize TX descriptors semaphore. The semaphore is required as the
1223 * size of the TX descriptor list is limited while the number of TX data
1224 * buffers is not.
1225 */
1226 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
1227 queue->tx_desc_list.len - 1);
1228 #endif
1229
1230 /* Set Receive Buffer Queue Pointer Register */
1231 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
1232 /* Set Transmit Buffer Queue Pointer Register */
1233 gmac->GMAC_TBQB = (uint32_t)queue->tx_desc_list.buf;
1234
1235 /* Configure GMAC DMA transfer */
1236 gmac->GMAC_DCFGR =
1237 /* Receive Buffer Size (defined in multiples of 64 bytes) */
1238 GMAC_DCFGR_DRBS(CONFIG_NET_BUF_DATA_SIZE >> 6) |
1239 #if defined(GMAC_DCFGR_RXBMS)
1240 /* Use full receive buffer size on parts where this is selectable */
1241 GMAC_DCFGR_RXBMS(3) |
1242 #endif
1243 /* Attempt to use INCR4 AHB bursts (Default) */
1244 GMAC_DCFGR_FBLDO_INCR4 |
1245 /* DMA Queue Flags */
1246 GMAC_DMA_QUEUE_FLAGS;
1247
1248 /* Setup RX/TX completion and error interrupts */
1249 gmac->GMAC_IER = GMAC_INT_EN_FLAGS;
1250
1251 queue->err_rx_frames_dropped = 0U;
1252 queue->err_rx_flushed_count = 0U;
1253 queue->err_tx_flushed_count = 0U;
1254
1255 LOG_INF("Queue %d activated", queue->que_idx);
1256
1257 return 0;
1258 }
1259
frame_get(struct gmac_queue * queue)1260 static struct net_pkt *frame_get(struct gmac_queue *queue)
1261 {
1262 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
1263 struct gmac_desc *rx_desc;
1264 struct net_buf **rx_frag_list = queue->rx_frag_list;
1265 struct net_pkt *rx_frame;
1266 bool frame_is_complete;
1267 struct net_buf *frag;
1268 struct net_buf *new_frag;
1269 struct net_buf *last_frag = NULL;
1270 uint8_t *frag_data;
1271 uint32_t frag_len;
1272 uint32_t frame_len = 0U;
1273 uint16_t tail;
1274 uint8_t wrap;
1275
1276 /* Check if there exists a complete frame in RX descriptor list */
1277 tail = rx_desc_list->tail;
1278 rx_desc = &rx_desc_list->buf[tail];
1279 frame_is_complete = false;
1280 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP)
1281 && !frame_is_complete) {
1282 frame_is_complete = (bool)(rx_desc->w1
1283 & GMAC_RXW1_EOF);
1284 MODULO_INC(tail, rx_desc_list->len);
1285 rx_desc = &rx_desc_list->buf[tail];
1286 }
1287 /* Frame which is not complete can be dropped by GMAC. Do not process
1288 * it, even partially.
1289 */
1290 if (!frame_is_complete) {
1291 return NULL;
1292 }
1293
1294 rx_frame = net_pkt_rx_alloc(K_NO_WAIT);
1295
1296 /* Process a frame */
1297 tail = rx_desc_list->tail;
1298 rx_desc = &rx_desc_list->buf[tail];
1299 frame_is_complete = false;
1300
1301 /* TODO: Don't assume first RX fragment will have SOF (Start of frame)
1302 * bit set. If SOF bit is missing recover gracefully by dropping
1303 * invalid frame.
1304 */
1305 __ASSERT(rx_desc->w1 & GMAC_RXW1_SOF,
1306 "First RX fragment is missing SOF bit");
1307
1308 /* TODO: We know already tail and head indexes of fragments containing
1309 * complete frame. Loop over those indexes, don't search for them
1310 * again.
1311 */
1312 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP)
1313 && !frame_is_complete) {
1314 frag = rx_frag_list[tail];
1315 frag_data =
1316 (uint8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR);
1317 __ASSERT(frag->data == frag_data,
1318 "RX descriptor and buffer list desynchronized");
1319 frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF);
1320 if (frame_is_complete) {
1321 frag_len = (rx_desc->w1 & GMAC_RXW1_LEN) - frame_len;
1322 } else {
1323 frag_len = CONFIG_NET_BUF_DATA_SIZE;
1324 }
1325
1326 frame_len += frag_len;
1327
1328 /* Link frame fragments only if RX net buffer is valid */
1329 if (rx_frame != NULL) {
1330 /* Assure cache coherency after DMA write operation */
1331 dcache_invalidate((uint32_t)frag_data, frag->size);
1332
1333 /* Get a new data net buffer from the buffer pool */
1334 new_frag = net_pkt_get_frag(rx_frame, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
1335 if (new_frag == NULL) {
1336 queue->err_rx_frames_dropped++;
1337 net_pkt_unref(rx_frame);
1338 rx_frame = NULL;
1339 } else {
1340 net_buf_add(frag, frag_len);
1341 if (!last_frag) {
1342 net_pkt_frag_insert(rx_frame, frag);
1343 } else {
1344 net_buf_frag_insert(last_frag, frag);
1345 }
1346 last_frag = frag;
1347 frag = new_frag;
1348 rx_frag_list[tail] = frag;
1349 }
1350 }
1351
1352 /* Update buffer descriptor status word */
1353 rx_desc->w1 = 0U;
1354 /* Guarantee that status word is written before the address
1355 * word to avoid race condition.
1356 */
1357 barrier_dmem_fence_full();
1358 /* Update buffer descriptor address word */
1359 wrap = (tail == rx_desc_list->len-1U ? GMAC_RXW0_WRAP : 0);
1360 rx_desc->w0 = ((uint32_t)frag->data & GMAC_RXW0_ADDR) | wrap;
1361
1362 MODULO_INC(tail, rx_desc_list->len);
1363 rx_desc = &rx_desc_list->buf[tail];
1364 }
1365
1366 rx_desc_list->tail = tail;
1367 LOG_DBG("Frame complete: rx=%p, tail=%d", rx_frame, tail);
1368 __ASSERT_NO_MSG(frame_is_complete);
1369
1370 return rx_frame;
1371 }
1372
eth_rx(struct gmac_queue * queue)1373 static void eth_rx(struct gmac_queue *queue)
1374 {
1375 struct eth_sam_dev_data *dev_data =
1376 CONTAINER_OF(queue, struct eth_sam_dev_data,
1377 queue_list[queue->que_idx]);
1378 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
1379 struct net_pkt *rx_frame;
1380 #if defined(CONFIG_NET_GPTP)
1381 const struct device *const dev = net_if_get_device(dev_data->iface);
1382 const struct eth_sam_dev_cfg *const cfg = dev->config;
1383 Gmac *gmac = cfg->regs;
1384 struct gptp_hdr *hdr;
1385 #endif
1386
1387 /* More than one frame could have been received by GMAC, get all
1388 * complete frames stored in the GMAC RX descriptor list.
1389 */
1390 rx_frame = frame_get(queue);
1391 while (rx_frame) {
1392 LOG_DBG("ETH rx");
1393
1394 #if defined(CONFIG_NET_VLAN)
1395 /* FIXME: Instead of this, use the GMAC register to get
1396 * the used VLAN tag.
1397 */
1398 {
1399 struct net_eth_hdr *p_hdr = NET_ETH_HDR(rx_frame);
1400
1401 if (ntohs(p_hdr->type) == NET_ETH_PTYPE_VLAN) {
1402 struct net_eth_vlan_hdr *hdr_vlan =
1403 (struct net_eth_vlan_hdr *)
1404 NET_ETH_HDR(rx_frame);
1405
1406 net_pkt_set_vlan_tci(rx_frame,
1407 ntohs(hdr_vlan->vlan.tci));
1408 vlan_tag = net_pkt_vlan_tag(rx_frame);
1409
1410 #if CONFIG_NET_TC_RX_COUNT > 1
1411 {
1412 enum net_priority prio;
1413
1414 prio = net_vlan2priority(
1415 net_pkt_vlan_priority(rx_frame));
1416 net_pkt_set_priority(rx_frame, prio);
1417 }
1418 #endif
1419 }
1420 }
1421 #endif
1422 #if defined(CONFIG_NET_GPTP)
1423 hdr = check_gptp_msg(get_iface(dev_data, vlan_tag), rx_frame,
1424 false);
1425
1426 timestamp_rx_pkt(gmac, hdr, rx_frame);
1427
1428 if (hdr) {
1429 update_pkt_priority(hdr, rx_frame);
1430 }
1431 #endif /* CONFIG_NET_GPTP */
1432
1433 if (net_recv_data(get_iface(dev_data, vlan_tag),
1434 rx_frame) < 0) {
1435 eth_stats_update_errors_rx(get_iface(dev_data,
1436 vlan_tag));
1437 net_pkt_unref(rx_frame);
1438 }
1439
1440 rx_frame = frame_get(queue);
1441 }
1442 }
1443
1444 #if !defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE) && \
1445 ((GMAC_ACTIVE_QUEUE_NUM != NET_TC_TX_COUNT) || \
1446 ((NET_TC_TX_COUNT != NET_TC_RX_COUNT) && defined(CONFIG_NET_VLAN)))
priority2queue(enum net_priority priority)1447 static int priority2queue(enum net_priority priority)
1448 {
1449 static const uint8_t queue_priority_map[] = {
1450 #if GMAC_ACTIVE_QUEUE_NUM == 1
1451 0, 0, 0, 0, 0, 0, 0, 0
1452 #endif
1453 #if GMAC_ACTIVE_QUEUE_NUM == 2
1454 0, 0, 0, 0, 1, 1, 1, 1
1455 #endif
1456 #if GMAC_ACTIVE_QUEUE_NUM == 3
1457 0, 0, 0, 0, 1, 1, 2, 2
1458 #endif
1459 #if GMAC_ACTIVE_QUEUE_NUM == 4
1460 0, 0, 0, 0, 1, 1, 2, 3
1461 #endif
1462 #if GMAC_ACTIVE_QUEUE_NUM == 5
1463 0, 0, 0, 0, 1, 2, 3, 4
1464 #endif
1465 #if GMAC_ACTIVE_QUEUE_NUM == 6
1466 0, 0, 0, 1, 2, 3, 4, 5
1467 #endif
1468 };
1469
1470 return queue_priority_map[priority];
1471 }
1472 #endif
1473
eth_tx(const struct device * dev,struct net_pkt * pkt)1474 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
1475 {
1476 const struct eth_sam_dev_cfg *const cfg = dev->config;
1477 struct eth_sam_dev_data *const dev_data = dev->data;
1478 Gmac *gmac = cfg->regs;
1479 struct gmac_queue *queue;
1480 struct gmac_desc_list *tx_desc_list;
1481 struct gmac_desc *tx_desc;
1482 struct gmac_desc *tx_first_desc;
1483 struct net_buf *frag;
1484 uint8_t *frag_data;
1485 uint16_t frag_len;
1486 uint32_t err_tx_flushed_count_at_entry;
1487 #if GMAC_MULTIPLE_TX_PACKETS == 1
1488 unsigned int key;
1489 #endif
1490 uint8_t pkt_prio;
1491 #if GMAC_MULTIPLE_TX_PACKETS == 0
1492 #if defined(CONFIG_NET_GPTP)
1493 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
1494 struct gptp_hdr *hdr;
1495 #if defined(CONFIG_NET_VLAN)
1496 struct net_eth_hdr *eth_hdr;
1497 #endif
1498 #endif
1499 #endif
1500
1501 __ASSERT(pkt, "buf pointer is NULL");
1502 __ASSERT(pkt->frags, "Frame data missing");
1503
1504 LOG_DBG("ETH tx");
1505
1506 /* Decide which queue should be used */
1507 pkt_prio = net_pkt_priority(pkt);
1508
1509 #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE)
1510 /* Route eveything to the forced queue */
1511 queue = &dev_data->queue_list[CONFIG_ETH_SAM_GMAC_FORCED_QUEUE];
1512 #elif GMAC_ACTIVE_QUEUE_NUM == CONFIG_NET_TC_TX_COUNT
1513 /* Prefer to chose queue based on its traffic class */
1514 queue = &dev_data->queue_list[net_tx_priority2tc(pkt_prio)];
1515 #else
1516 /* If that's not possible due to config - use builtin mapping */
1517 queue = &dev_data->queue_list[priority2queue(pkt_prio)];
1518 #endif
1519
1520 tx_desc_list = &queue->tx_desc_list;
1521 err_tx_flushed_count_at_entry = queue->err_tx_flushed_count;
1522
1523 frag = pkt->frags;
1524
1525 /* Keep reference to the descriptor */
1526 tx_first_desc = &tx_desc_list->buf[tx_desc_list->head];
1527
1528 while (frag) {
1529 frag_data = frag->data;
1530 frag_len = frag->len;
1531
1532 /* Assure cache coherency before DMA read operation */
1533 dcache_clean((uint32_t)frag_data, frag->size);
1534
1535 #if GMAC_MULTIPLE_TX_PACKETS == 1
1536 k_sem_take(&queue->tx_desc_sem, K_FOREVER);
1537
1538 /* The following section becomes critical and requires IRQ lock
1539 * / unlock protection only due to the possibility of executing
1540 * tx_error_handler() function.
1541 */
1542 key = irq_lock();
1543
1544 /* Check if tx_error_handler() function was executed */
1545 if (queue->err_tx_flushed_count !=
1546 err_tx_flushed_count_at_entry) {
1547 irq_unlock(key);
1548 return -EIO;
1549 }
1550 #endif
1551
1552 tx_desc = &tx_desc_list->buf[tx_desc_list->head];
1553
1554 /* Update buffer descriptor address word */
1555 tx_desc->w0 = (uint32_t)frag_data;
1556
1557 /* Update buffer descriptor status word (clear used bit except
1558 * for the first frag).
1559 */
1560 tx_desc->w1 = (frag_len & GMAC_TXW1_LEN)
1561 | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0)
1562 | (tx_desc_list->head == tx_desc_list->len - 1U
1563 ? GMAC_TXW1_WRAP : 0)
1564 | (tx_desc == tx_first_desc ? GMAC_TXW1_USED : 0);
1565
1566 /* Update descriptor position */
1567 MODULO_INC(tx_desc_list->head, tx_desc_list->len);
1568
1569 #if GMAC_MULTIPLE_TX_PACKETS == 1
1570 __ASSERT(tx_desc_list->head != tx_desc_list->tail,
1571 "tx_desc_list overflow");
1572
1573 /* Account for a sent frag */
1574 ring_buf_put(&queue->tx_frag_list, POINTER_TO_UINT(frag));
1575
1576 /* frag is internally queued, so it requires to hold a reference */
1577 net_pkt_frag_ref(frag);
1578
1579 irq_unlock(key);
1580 #endif
1581
1582 /* Continue with the rest of fragments (only data) */
1583 frag = frag->frags;
1584 }
1585
1586 #if GMAC_MULTIPLE_TX_PACKETS == 1
1587 key = irq_lock();
1588
1589 /* Check if tx_error_handler() function was executed */
1590 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
1591 irq_unlock(key);
1592 return -EIO;
1593 }
1594 #endif
1595
1596 /* Ensure the descriptor following the last one is marked as used */
1597 tx_desc_list->buf[tx_desc_list->head].w1 = GMAC_TXW1_USED;
1598
1599 /* Guarantee that all the fragments have been written before removing
1600 * the used bit to avoid race condition.
1601 */
1602 barrier_dmem_fence_full();
1603
1604 /* Remove the used bit of the first fragment to allow the controller
1605 * to process it and the following fragments.
1606 */
1607 tx_first_desc->w1 &= ~GMAC_TXW1_USED;
1608
1609 #if GMAC_MULTIPLE_TX_PACKETS == 1
1610 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1611 /* Account for a sent frame */
1612 ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt));
1613
1614 /* pkt is internally queued, so it requires to hold a reference */
1615 net_pkt_ref(pkt);
1616 #endif
1617
1618 irq_unlock(key);
1619 #endif
1620
1621 /* Guarantee that the first fragment got its bit removed before starting
1622 * sending packets to avoid packets getting stuck.
1623 */
1624 barrier_dmem_fence_full();
1625
1626 /* Start transmission */
1627 gmac->GMAC_NCR |= GMAC_NCR_TSTART;
1628
1629 #if GMAC_MULTIPLE_TX_PACKETS == 0
1630 /* Wait until the packet is sent */
1631 k_sem_take(&queue->tx_sem, K_FOREVER);
1632
1633 /* Check if transmit successful or not */
1634 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
1635 return -EIO;
1636 }
1637 #if defined(CONFIG_NET_GPTP)
1638 #if defined(CONFIG_NET_VLAN)
1639 eth_hdr = NET_ETH_HDR(pkt);
1640 if (ntohs(eth_hdr->type) == NET_ETH_PTYPE_VLAN) {
1641 vlan_tag = net_pkt_vlan_tag(pkt);
1642 }
1643 #endif
1644 #if defined(CONFIG_NET_GPTP)
1645 hdr = check_gptp_msg(get_iface(dev_data, vlan_tag), pkt, true);
1646 timestamp_tx_pkt(gmac, hdr, pkt);
1647 if (hdr && need_timestamping(hdr)) {
1648 net_if_add_tx_timestamp(pkt);
1649 }
1650 #endif
1651 #endif
1652 #endif
1653
1654 return 0;
1655 }
1656
queue0_isr(const struct device * dev)1657 static void queue0_isr(const struct device *dev)
1658 {
1659 const struct eth_sam_dev_cfg *const cfg = dev->config;
1660 struct eth_sam_dev_data *const dev_data = dev->data;
1661 Gmac *gmac = cfg->regs;
1662 struct gmac_queue *queue;
1663 struct gmac_desc_list *rx_desc_list;
1664 struct gmac_desc_list *tx_desc_list;
1665 struct gmac_desc *tail_desc;
1666 uint32_t isr;
1667
1668 /* Interrupt Status Register is cleared on read */
1669 isr = gmac->GMAC_ISR;
1670 LOG_DBG("GMAC_ISR=0x%08x", isr);
1671
1672 queue = &dev_data->queue_list[0];
1673 rx_desc_list = &queue->rx_desc_list;
1674 tx_desc_list = &queue->tx_desc_list;
1675
1676 /* RX packet */
1677 if (isr & GMAC_INT_RX_ERR_BITS) {
1678 rx_error_handler(gmac, queue);
1679 } else if (isr & GMAC_ISR_RCOMP) {
1680 tail_desc = &rx_desc_list->buf[rx_desc_list->tail];
1681 LOG_DBG("rx.w1=0x%08x, tail=%d",
1682 tail_desc->w1,
1683 rx_desc_list->tail);
1684 eth_rx(queue);
1685 }
1686
1687 /* TX packet */
1688 if (isr & GMAC_INT_TX_ERR_BITS) {
1689 tx_error_handler(gmac, queue);
1690 } else if (isr & GMAC_ISR_TCOMP) {
1691 #if GMAC_MULTIPLE_TX_PACKETS == 1
1692 tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
1693 LOG_DBG("tx.w1=0x%08x, tail=%d",
1694 tail_desc->w1,
1695 tx_desc_list->tail);
1696 #endif
1697
1698 tx_completed(gmac, queue);
1699 }
1700
1701 if (isr & GMAC_IER_HRESP) {
1702 LOG_DBG("IER HRESP");
1703 }
1704 }
1705
1706 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
priority_queue_isr(const struct device * dev,unsigned int queue_idx)1707 static inline void priority_queue_isr(const struct device *dev,
1708 unsigned int queue_idx)
1709 {
1710 const struct eth_sam_dev_cfg *const cfg = dev->config;
1711 struct eth_sam_dev_data *const dev_data = dev->data;
1712 Gmac *gmac = cfg->regs;
1713 struct gmac_queue *queue;
1714 struct gmac_desc_list *rx_desc_list;
1715 struct gmac_desc_list *tx_desc_list;
1716 struct gmac_desc *tail_desc;
1717 uint32_t isrpq;
1718
1719 isrpq = gmac->GMAC_ISRPQ[queue_idx - 1];
1720 LOG_DBG("GMAC_ISRPQ%d=0x%08x", queue_idx - 1, isrpq);
1721
1722 queue = &dev_data->queue_list[queue_idx];
1723 rx_desc_list = &queue->rx_desc_list;
1724 tx_desc_list = &queue->tx_desc_list;
1725
1726 /* RX packet */
1727 if (isrpq & GMAC_INTPQ_RX_ERR_BITS) {
1728 rx_error_handler(gmac, queue);
1729 } else if (isrpq & GMAC_ISRPQ_RCOMP) {
1730 tail_desc = &rx_desc_list->buf[rx_desc_list->tail];
1731 LOG_DBG("rx.w1=0x%08x, tail=%d",
1732 tail_desc->w1,
1733 rx_desc_list->tail);
1734 eth_rx(queue);
1735 }
1736
1737 /* TX packet */
1738 if (isrpq & GMAC_INTPQ_TX_ERR_BITS) {
1739 tx_error_handler(gmac, queue);
1740 } else if (isrpq & GMAC_ISRPQ_TCOMP) {
1741 #if GMAC_MULTIPLE_TX_PACKETS == 1
1742 tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
1743 LOG_DBG("tx.w1=0x%08x, tail=%d",
1744 tail_desc->w1,
1745 tx_desc_list->tail);
1746 #endif
1747
1748 tx_completed(gmac, queue);
1749 }
1750
1751 if (isrpq & GMAC_IERPQ_HRESP) {
1752 LOG_DBG("IERPQ%d HRESP", queue_idx - 1);
1753 }
1754 }
1755 #endif
1756
1757 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
queue1_isr(const struct device * dev)1758 static void queue1_isr(const struct device *dev)
1759 {
1760 priority_queue_isr(dev, 1);
1761 }
1762 #endif
1763
1764 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
queue2_isr(const struct device * dev)1765 static void queue2_isr(const struct device *dev)
1766 {
1767 priority_queue_isr(dev, 2);
1768 }
1769 #endif
1770
1771 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
queue3_isr(const struct device * dev)1772 static void queue3_isr(const struct device *dev)
1773 {
1774 priority_queue_isr(dev, 3);
1775 }
1776 #endif
1777
1778 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
queue4_isr(const struct device * dev)1779 static void queue4_isr(const struct device *dev)
1780 {
1781 priority_queue_isr(dev, 4);
1782 }
1783 #endif
1784
1785 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
queue5_isr(const struct device * dev)1786 static void queue5_isr(const struct device *dev)
1787 {
1788 priority_queue_isr(dev, 5);
1789 }
1790 #endif
1791
eth_initialize(const struct device * dev)1792 static int eth_initialize(const struct device *dev)
1793 {
1794 const struct eth_sam_dev_cfg *const cfg = dev->config;
1795 int retval;
1796
1797 cfg->config_func();
1798
1799 #ifdef CONFIG_SOC_FAMILY_SAM
1800 /* Enable GMAC module's clock */
1801 (void)clock_control_on(SAM_DT_PMC_CONTROLLER,
1802 (clock_control_subsys_t)&cfg->clock_cfg);
1803 #else
1804 /* Enable MCLK clock on GMAC */
1805 MCLK->AHBMASK.reg |= MCLK_AHBMASK_GMAC;
1806 *MCLK_GMAC |= MCLK_GMAC_MASK;
1807 #endif
1808 /* Connect pins to the peripheral */
1809 retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1810
1811 return retval;
1812 }
1813
1814 #if DT_INST_NODE_HAS_PROP(0, mac_eeprom)
get_mac_addr_from_i2c_eeprom(uint8_t mac_addr[6])1815 static void get_mac_addr_from_i2c_eeprom(uint8_t mac_addr[6])
1816 {
1817 uint32_t iaddr = CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS;
1818 int ret;
1819 const struct i2c_dt_spec i2c = I2C_DT_SPEC_GET(DT_INST_PHANDLE(0, mac_eeprom));
1820
1821 if (!device_is_ready(i2c.bus)) {
1822 LOG_ERR("Bus device is not ready");
1823 return;
1824 }
1825
1826 ret = i2c_write_read_dt(&i2c,
1827 &iaddr, CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS_SIZE,
1828 mac_addr, 6);
1829
1830 if (ret != 0) {
1831 LOG_ERR("I2C: failed to read MAC addr");
1832 return;
1833 }
1834 }
1835 #endif
1836
generate_mac(uint8_t mac_addr[6])1837 static void generate_mac(uint8_t mac_addr[6])
1838 {
1839 #if DT_INST_NODE_HAS_PROP(0, mac_eeprom)
1840 get_mac_addr_from_i2c_eeprom(mac_addr);
1841 #elif DT_INST_PROP(0, zephyr_random_mac_address)
1842 gen_random_mac(mac_addr, ATMEL_OUI_B0, ATMEL_OUI_B1, ATMEL_OUI_B2);
1843 #endif
1844 }
1845
phy_link_state_changed(const struct device * pdev,struct phy_link_state * state,void * user_data)1846 static void phy_link_state_changed(const struct device *pdev,
1847 struct phy_link_state *state,
1848 void *user_data)
1849 {
1850 const struct device *dev = (const struct device *) user_data;
1851 struct eth_sam_dev_data *const dev_data = dev->data;
1852 const struct eth_sam_dev_cfg *const cfg = dev->config;
1853 bool is_up;
1854
1855 is_up = state->is_up;
1856
1857 if (is_up && !dev_data->link_up) {
1858 LOG_INF("Link up");
1859
1860 /* Announce link up status */
1861 dev_data->link_up = true;
1862 net_eth_carrier_on(dev_data->iface);
1863
1864 /* Set up link */
1865 link_configure(cfg->regs,
1866 PHY_LINK_IS_FULL_DUPLEX(state->speed),
1867 PHY_LINK_IS_SPEED_100M(state->speed));
1868 } else if (!is_up && dev_data->link_up) {
1869 LOG_INF("Link down");
1870
1871 /* Announce link down status */
1872 dev_data->link_up = false;
1873 net_eth_carrier_off(dev_data->iface);
1874 }
1875 }
1876
eth0_iface_init(struct net_if * iface)1877 static void eth0_iface_init(struct net_if *iface)
1878 {
1879 const struct device *dev = net_if_get_device(iface);
1880 struct eth_sam_dev_data *const dev_data = dev->data;
1881 const struct eth_sam_dev_cfg *const cfg = dev->config;
1882 static bool init_done;
1883 uint32_t gmac_ncfgr_val;
1884 int result;
1885 int i;
1886
1887 /* For VLAN, this value is only used to get the correct L2 driver.
1888 * The iface pointer in context should contain the main interface
1889 * if the VLANs are enabled.
1890 */
1891 if (dev_data->iface == NULL) {
1892 dev_data->iface = iface;
1893 }
1894
1895 ethernet_init(iface);
1896
1897 /* The rest of initialization should only be done once */
1898 if (init_done) {
1899 return;
1900 }
1901
1902 /* Check the status of data caches */
1903 dcache_is_enabled();
1904
1905 /* Initialize GMAC driver */
1906 gmac_ncfgr_val =
1907 GMAC_NCFGR_MTIHEN /* Multicast Hash Enable */
1908 | GMAC_NCFGR_LFERD /* Length Field Error Frame Discard */
1909 | GMAC_NCFGR_RFCS /* Remove Frame Check Sequence */
1910 | GMAC_NCFGR_RXCOEN /* Receive Checksum Offload Enable */
1911 | GMAC_MAX_FRAME_SIZE;
1912 result = gmac_init(cfg->regs, gmac_ncfgr_val);
1913 if (result < 0) {
1914 LOG_ERR("Unable to initialize ETH driver");
1915 return;
1916 }
1917
1918 generate_mac(dev_data->mac_addr);
1919
1920 LOG_INF("MAC: %02x:%02x:%02x:%02x:%02x:%02x",
1921 dev_data->mac_addr[0], dev_data->mac_addr[1],
1922 dev_data->mac_addr[2], dev_data->mac_addr[3],
1923 dev_data->mac_addr[4], dev_data->mac_addr[5]);
1924
1925 /* Set MAC Address for frame filtering logic */
1926 mac_addr_set(cfg->regs, 0, dev_data->mac_addr);
1927
1928 /* Register Ethernet MAC Address with the upper layer */
1929 net_if_set_link_addr(iface, dev_data->mac_addr,
1930 sizeof(dev_data->mac_addr),
1931 NET_LINK_ETHERNET);
1932
1933 /* Initialize GMAC queues */
1934 for (i = GMAC_QUE_0; i < GMAC_QUEUE_NUM; i++) {
1935 result = queue_init(cfg->regs, &dev_data->queue_list[i]);
1936 if (result < 0) {
1937 LOG_ERR("Unable to initialize ETH queue%d", i);
1938 return;
1939 }
1940 }
1941
1942 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
1943 #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE)
1944 for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) {
1945 cfg->regs->GMAC_ST1RPQ[i] =
1946 GMAC_ST1RPQ_DSTCM(i) |
1947 GMAC_ST1RPQ_QNB(CONFIG_ETH_SAM_GMAC_FORCED_QUEUE);
1948 }
1949 #elif GMAC_ACTIVE_QUEUE_NUM == NET_TC_RX_COUNT
1950 /* If TC configuration is compatible with HW configuration, setup the
1951 * screening registers based on the DS/TC values.
1952 * Map them 1:1 - TC 0 -> Queue 0, TC 1 -> Queue 1 etc.
1953 */
1954 for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) {
1955 cfg->regs->GMAC_ST1RPQ[i] =
1956 GMAC_ST1RPQ_DSTCM(i) | GMAC_ST1RPQ_QNB(i);
1957 }
1958 #elif defined(CONFIG_NET_VLAN)
1959 /* If VLAN is enabled, route packets according to VLAN priority */
1960 int j;
1961
1962 i = 0;
1963 for (j = NET_PRIORITY_NC; j >= 0; --j) {
1964 if (priority2queue(j) == 0) {
1965 /* No point to set rules for the regular queue */
1966 continue;
1967 }
1968
1969 if (i >= ARRAY_SIZE(cfg->regs->GMAC_ST2RPQ)) {
1970 /* No more screening registers available */
1971 break;
1972 }
1973
1974 cfg->regs->GMAC_ST2RPQ[i++] =
1975 GMAC_ST2RPQ_QNB(priority2queue(j))
1976 | GMAC_ST2RPQ_VLANP(j)
1977 | GMAC_ST2RPQ_VLANE;
1978 }
1979
1980 #endif
1981 #endif
1982 if (device_is_ready(cfg->phy_dev)) {
1983 phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed,
1984 (void *)dev);
1985
1986 } else {
1987 LOG_ERR("PHY device not ready");
1988 }
1989
1990 /* Do not start the interface until PHY link is up */
1991 if (!(dev_data->link_up)) {
1992 net_if_carrier_off(iface);
1993 }
1994
1995 init_done = true;
1996 }
1997
eth_sam_gmac_get_capabilities(const struct device * dev)1998 static enum ethernet_hw_caps eth_sam_gmac_get_capabilities(const struct device *dev)
1999 {
2000 ARG_UNUSED(dev);
2001
2002 return ETHERNET_HW_VLAN | ETHERNET_LINK_10BASE_T |
2003 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2004 ETHERNET_PTP |
2005 #endif
2006 ETHERNET_PRIORITY_QUEUES |
2007 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2008 ETHERNET_QAV |
2009 #endif
2010 ETHERNET_LINK_100BASE_T;
2011 }
2012
2013 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_set_qav_param(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)2014 static int eth_sam_gmac_set_qav_param(const struct device *dev,
2015 enum ethernet_config_type type,
2016 const struct ethernet_config *config)
2017 {
2018 const struct eth_sam_dev_cfg *const cfg = dev->config;
2019 Gmac *gmac = cfg->regs;
2020 enum ethernet_qav_param_type qav_param_type;
2021 unsigned int delta_bandwidth;
2022 unsigned int idle_slope;
2023 int queue_id;
2024 bool enable;
2025
2026 /* Priority queue IDs start from 1 for SAM GMAC */
2027 queue_id = config->qav_param.queue_id + 1;
2028
2029 qav_param_type = config->qav_param.type;
2030
2031 switch (qav_param_type) {
2032 case ETHERNET_QAV_PARAM_TYPE_STATUS:
2033 enable = config->qav_param.enabled;
2034 return eth_sam_gmac_setup_qav(gmac, queue_id, enable);
2035 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
2036 delta_bandwidth = config->qav_param.delta_bandwidth;
2037
2038 return eth_sam_gmac_setup_qav_delta_bandwidth(gmac, queue_id,
2039 delta_bandwidth);
2040 case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
2041 idle_slope = config->qav_param.idle_slope;
2042
2043 /* The standard uses bps, SAM GMAC uses Bps - convert now */
2044 idle_slope /= 8U;
2045
2046 return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id,
2047 idle_slope);
2048 default:
2049 break;
2050 }
2051
2052 return -ENOTSUP;
2053 }
2054 #endif
2055
eth_sam_gmac_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)2056 static int eth_sam_gmac_set_config(const struct device *dev,
2057 enum ethernet_config_type type,
2058 const struct ethernet_config *config)
2059 {
2060 int result = 0;
2061
2062 switch (type) {
2063 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2064 case ETHERNET_CONFIG_TYPE_QAV_PARAM:
2065 return eth_sam_gmac_set_qav_param(dev, type, config);
2066 #endif
2067 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
2068 {
2069 struct eth_sam_dev_data *const dev_data = dev->data;
2070 const struct eth_sam_dev_cfg *const cfg = dev->config;
2071
2072 memcpy(dev_data->mac_addr,
2073 config->mac_address.addr,
2074 sizeof(dev_data->mac_addr));
2075
2076 /* Set MAC Address for frame filtering logic */
2077 mac_addr_set(cfg->regs, 0, dev_data->mac_addr);
2078
2079 LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
2080 dev->name,
2081 dev_data->mac_addr[0], dev_data->mac_addr[1],
2082 dev_data->mac_addr[2], dev_data->mac_addr[3],
2083 dev_data->mac_addr[4], dev_data->mac_addr[5]);
2084
2085 /* Register Ethernet MAC Address with the upper layer */
2086 net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
2087 sizeof(dev_data->mac_addr),
2088 NET_LINK_ETHERNET);
2089 break;
2090 }
2091 default:
2092 result = -ENOTSUP;
2093 break;
2094 }
2095
2096 return result;
2097 }
2098
2099 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_get_qav_param(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)2100 static int eth_sam_gmac_get_qav_param(const struct device *dev,
2101 enum ethernet_config_type type,
2102 struct ethernet_config *config)
2103 {
2104 const struct eth_sam_dev_cfg *const cfg = dev->config;
2105 Gmac *gmac = cfg->regs;
2106 enum ethernet_qav_param_type qav_param_type;
2107 int queue_id;
2108 bool *enabled;
2109 unsigned int *idle_slope;
2110 unsigned int *delta_bandwidth;
2111
2112 /* Priority queue IDs start from 1 for SAM GMAC */
2113 queue_id = config->qav_param.queue_id + 1;
2114
2115 qav_param_type = config->qav_param.type;
2116
2117 switch (qav_param_type) {
2118 case ETHERNET_QAV_PARAM_TYPE_STATUS:
2119 enabled = &config->qav_param.enabled;
2120 return eth_sam_gmac_get_qav_status(gmac, queue_id, enabled);
2121 case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
2122 idle_slope = &config->qav_param.idle_slope;
2123 return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id,
2124 idle_slope);
2125 case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE:
2126 idle_slope = &config->qav_param.oper_idle_slope;
2127 return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id,
2128 idle_slope);
2129 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
2130 delta_bandwidth = &config->qav_param.delta_bandwidth;
2131 return eth_sam_gmac_get_qav_delta_bandwidth(gmac, queue_id,
2132 delta_bandwidth);
2133 case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS:
2134 #if GMAC_ACTIVE_QUEUE_NUM == NET_TC_TX_COUNT
2135 config->qav_param.traffic_class = queue_id;
2136 return 0;
2137 #else
2138 /* Invalid configuration - no direct TC to queue mapping */
2139 return -ENOTSUP;
2140 #endif
2141 default:
2142 break;
2143 }
2144
2145 return -ENOTSUP;
2146 }
2147 #endif
2148
eth_sam_gmac_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)2149 static int eth_sam_gmac_get_config(const struct device *dev,
2150 enum ethernet_config_type type,
2151 struct ethernet_config *config)
2152 {
2153 switch (type) {
2154 case ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM:
2155 config->priority_queues_num = GMAC_ACTIVE_PRIORITY_QUEUE_NUM;
2156 return 0;
2157 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2158 case ETHERNET_CONFIG_TYPE_QAV_PARAM:
2159 return eth_sam_gmac_get_qav_param(dev, type, config);
2160 #endif
2161 default:
2162 break;
2163 }
2164
2165 return -ENOTSUP;
2166 }
2167
2168 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
eth_sam_gmac_get_ptp_clock(const struct device * dev)2169 static const struct device *eth_sam_gmac_get_ptp_clock(const struct device *dev)
2170 {
2171 struct eth_sam_dev_data *const dev_data = dev->data;
2172
2173 return dev_data->ptp_clock;
2174 }
2175 #endif
2176
2177 static const struct ethernet_api eth_api = {
2178 .iface_api.init = eth0_iface_init,
2179
2180 .get_capabilities = eth_sam_gmac_get_capabilities,
2181 .set_config = eth_sam_gmac_set_config,
2182 .get_config = eth_sam_gmac_get_config,
2183 .send = eth_tx,
2184
2185 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2186 .get_ptp_clock = eth_sam_gmac_get_ptp_clock,
2187 #endif
2188 };
2189
eth0_irq_config(void)2190 static void eth0_irq_config(void)
2191 {
2192 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, gmac, irq),
2193 DT_INST_IRQ_BY_NAME(0, gmac, priority),
2194 queue0_isr, DEVICE_DT_INST_GET(0), 0);
2195 irq_enable(DT_INST_IRQ_BY_NAME(0, gmac, irq));
2196
2197 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2198 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q1, irq),
2199 DT_INST_IRQ_BY_NAME(0, q1, priority),
2200 queue1_isr, DEVICE_DT_INST_GET(0), 0);
2201 irq_enable(DT_INST_IRQ_BY_NAME(0, q1, irq));
2202 #endif
2203
2204 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
2205 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q2, irq),
2206 DT_INST_IRQ_BY_NAME(0, q1, priority),
2207 queue2_isr, DEVICE_DT_INST_GET(0), 0);
2208 irq_enable(DT_INST_IRQ_BY_NAME(0, q2, irq));
2209 #endif
2210
2211 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
2212 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q3, irq),
2213 DT_INST_IRQ_BY_NAME(0, q3, priority),
2214 queue3_isr, DEVICE_DT_INST_GET(0), 0);
2215 irq_enable(DT_INST_IRQ_BY_NAME(0, q3, irq));
2216 #endif
2217
2218 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
2219 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q4, irq),
2220 DT_INST_IRQ_BY_NAME(0, q4, priority),
2221 queue4_isr, DEVICE_DT_INST_GET(0), 0);
2222 irq_enable(DT_INST_IRQ_BY_NAME(0, q4, irq));
2223 #endif
2224
2225 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
2226 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q5, irq),
2227 DT_INST_IRQ_BY_NAME(0, q5, priority),
2228 queue5_isr, DEVICE_DT_INST_GET(0), 0);
2229 irq_enable(DT_INST_IRQ_BY_NAME(0, q5, irq));
2230 #endif
2231 }
2232
2233 PINCTRL_DT_INST_DEFINE(0);
2234
2235 static const struct eth_sam_dev_cfg eth0_config = {
2236 .regs = (Gmac *)DT_INST_REG_ADDR(0),
2237 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
2238 #ifdef CONFIG_SOC_FAMILY_SAM
2239 .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0),
2240 #endif
2241 .config_func = eth0_irq_config,
2242 .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle))
2243 };
2244
2245 static struct eth_sam_dev_data eth0_data = {
2246 #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
2247 .mac_addr = DT_INST_PROP(0, local_mac_address),
2248 #endif
2249 .queue_list = {
2250 {
2251 .que_idx = GMAC_QUE_0,
2252 .rx_desc_list = {
2253 .buf = rx_desc_que0,
2254 .len = ARRAY_SIZE(rx_desc_que0),
2255 },
2256 .tx_desc_list = {
2257 .buf = tx_desc_que0,
2258 .len = ARRAY_SIZE(tx_desc_que0),
2259 },
2260 .rx_frag_list = rx_frag_list_que0,
2261 #if GMAC_MULTIPLE_TX_PACKETS == 1
2262 .tx_frag_list = {
2263 .buf = (uint32_t *)tx_frag_list_que0,
2264 .len = ARRAY_SIZE(tx_frag_list_que0),
2265 },
2266 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2267 .tx_frames = {
2268 .buf = (uint32_t *)tx_frame_list_que0,
2269 .len = ARRAY_SIZE(tx_frame_list_que0),
2270 },
2271 #endif
2272 #endif
2273 #if GMAC_PRIORITY_QUEUE_NUM >= 1
2274 }, {
2275 .que_idx = GMAC_QUE_1,
2276 .rx_desc_list = {
2277 .buf = rx_desc_que1,
2278 .len = ARRAY_SIZE(rx_desc_que1),
2279 },
2280 .tx_desc_list = {
2281 .buf = tx_desc_que1,
2282 .len = ARRAY_SIZE(tx_desc_que1),
2283 },
2284 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2285 .rx_frag_list = rx_frag_list_que1,
2286 #if GMAC_MULTIPLE_TX_PACKETS == 1
2287 .tx_frag_list = {
2288 .buf = (uint32_t *)tx_frag_list_que1,
2289 .len = ARRAY_SIZE(tx_frag_list_que1),
2290 },
2291 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2292 .tx_frames = {
2293 .buf = (uint32_t *)tx_frame_list_que1,
2294 .len = ARRAY_SIZE(tx_frame_list_que1),
2295 }
2296 #endif
2297 #endif
2298 #endif
2299 #endif
2300 #if GMAC_PRIORITY_QUEUE_NUM >= 2
2301 }, {
2302 .que_idx = GMAC_QUE_2,
2303 .rx_desc_list = {
2304 .buf = rx_desc_que2,
2305 .len = ARRAY_SIZE(rx_desc_que2),
2306 },
2307 .tx_desc_list = {
2308 .buf = tx_desc_que2,
2309 .len = ARRAY_SIZE(tx_desc_que2),
2310 },
2311 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
2312 .rx_frag_list = rx_frag_list_que2,
2313 #if GMAC_MULTIPLE_TX_PACKETS == 1
2314 .tx_frag_list = {
2315 .buf = (uint32_t *)tx_frag_list_que2,
2316 .len = ARRAY_SIZE(tx_frag_list_que2),
2317 },
2318 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2319 .tx_frames = {
2320 .buf = (uint32_t *)tx_frame_list_que2,
2321 .len = ARRAY_SIZE(tx_frame_list_que2),
2322 }
2323 #endif
2324 #endif
2325 #endif
2326 #endif
2327 #if GMAC_PRIORITY_QUEUE_NUM >= 3
2328 }, {
2329 .que_idx = GMAC_QUE_3,
2330 .rx_desc_list = {
2331 .buf = rx_desc_que3,
2332 .len = ARRAY_SIZE(rx_desc_que3),
2333 },
2334 .tx_desc_list = {
2335 .buf = tx_desc_que3,
2336 .len = ARRAY_SIZE(tx_desc_que3),
2337 },
2338 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
2339 .rx_frag_list = rx_frag_list_que3,
2340 #if GMAC_MULTIPLE_TX_PACKETS == 1
2341 .tx_frag_list = {
2342 .buf = (uint32_t *)tx_frag_list_que3,
2343 .len = ARRAY_SIZE(tx_frag_list_que3),
2344 },
2345 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2346 .tx_frames = {
2347 .buf = (uint32_t *)tx_frame_list_que3,
2348 .len = ARRAY_SIZE(tx_frame_list_que3),
2349 }
2350 #endif
2351 #endif
2352 #endif
2353 #endif
2354 #if GMAC_PRIORITY_QUEUE_NUM >= 4
2355 }, {
2356 .que_idx = GMAC_QUE_4,
2357 .rx_desc_list = {
2358 .buf = rx_desc_que4,
2359 .len = ARRAY_SIZE(rx_desc_que4),
2360 },
2361 .tx_desc_list = {
2362 .buf = tx_desc_que4,
2363 .len = ARRAY_SIZE(tx_desc_que4),
2364 },
2365 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
2366 .rx_frag_list = rx_frag_list_que4,
2367 #if GMAC_MULTIPLE_TX_PACKETS == 1
2368 .tx_frag_list = {
2369 .buf = (uint32_t *)tx_frag_list_que4,
2370 .len = ARRAY_SIZE(tx_frag_list_que4),
2371 },
2372 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2373 .tx_frames = {
2374 .buf = (uint32_t *)tx_frame_list_que4,
2375 .len = ARRAY_SIZE(tx_frame_list_que4),
2376 }
2377 #endif
2378 #endif
2379 #endif
2380 #endif
2381 #if GMAC_PRIORITY_QUEUE_NUM >= 5
2382 }, {
2383 .que_idx = GMAC_QUE_5,
2384 .rx_desc_list = {
2385 .buf = rx_desc_que5,
2386 .len = ARRAY_SIZE(rx_desc_que5),
2387 },
2388 .tx_desc_list = {
2389 .buf = tx_desc_que5,
2390 .len = ARRAY_SIZE(tx_desc_que5),
2391 },
2392 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
2393 .rx_frag_list = rx_frag_list_que5,
2394 #if GMAC_MULTIPLE_TX_PACKETS == 1
2395 .tx_frag_list = {
2396 .buf = (uint32_t *)tx_frag_list_que5,
2397 .len = ARRAY_SIZE(tx_frag_list_que5),
2398 },
2399 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2400 .tx_frames = {
2401 .buf = (uint32_t *)tx_frame_list_que5,
2402 .len = ARRAY_SIZE(tx_frame_list_que5),
2403 }
2404 #endif
2405 #endif
2406 #endif
2407 #endif
2408 }
2409 },
2410 };
2411
2412 ETH_NET_DEVICE_DT_INST_DEFINE(0,
2413 eth_initialize, NULL, ð0_data,
2414 ð0_config, CONFIG_ETH_INIT_PRIORITY, ð_api,
2415 GMAC_MTU);
2416
2417 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2418 struct ptp_context {
2419 const struct device *eth_dev;
2420 };
2421
2422 static struct ptp_context ptp_gmac_0_context;
2423
ptp_clock_sam_gmac_set(const struct device * dev,struct net_ptp_time * tm)2424 static int ptp_clock_sam_gmac_set(const struct device *dev,
2425 struct net_ptp_time *tm)
2426 {
2427 struct ptp_context *ptp_context = dev->data;
2428 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2429 Gmac *gmac = cfg->regs;
2430
2431 gmac->GMAC_TSH = tm->_sec.high & 0xffff;
2432 gmac->GMAC_TSL = tm->_sec.low & 0xffffffff;
2433 gmac->GMAC_TN = tm->nanosecond & 0xffffffff;
2434
2435 return 0;
2436 }
2437
ptp_clock_sam_gmac_get(const struct device * dev,struct net_ptp_time * tm)2438 static int ptp_clock_sam_gmac_get(const struct device *dev,
2439 struct net_ptp_time *tm)
2440 {
2441 struct ptp_context *ptp_context = dev->data;
2442 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2443 Gmac *gmac = cfg->regs;
2444
2445 tm->second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL;
2446 tm->nanosecond = gmac->GMAC_TN;
2447
2448 return 0;
2449 }
2450
ptp_clock_sam_gmac_adjust(const struct device * dev,int increment)2451 static int ptp_clock_sam_gmac_adjust(const struct device *dev, int increment)
2452 {
2453 struct ptp_context *ptp_context = dev->data;
2454 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2455 Gmac *gmac = cfg->regs;
2456
2457 if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) {
2458 return -EINVAL;
2459 }
2460
2461 if (increment < 0) {
2462 gmac->GMAC_TA = GMAC_TA_ADJ | GMAC_TA_ITDT(-increment);
2463 } else {
2464 gmac->GMAC_TA = GMAC_TA_ITDT(increment);
2465 }
2466
2467 return 0;
2468 }
2469
ptp_clock_sam_gmac_rate_adjust(const struct device * dev,double ratio)2470 static int ptp_clock_sam_gmac_rate_adjust(const struct device *dev,
2471 double ratio)
2472 {
2473 return -ENOTSUP;
2474 }
2475
2476 static const struct ptp_clock_driver_api ptp_api = {
2477 .set = ptp_clock_sam_gmac_set,
2478 .get = ptp_clock_sam_gmac_get,
2479 .adjust = ptp_clock_sam_gmac_adjust,
2480 .rate_adjust = ptp_clock_sam_gmac_rate_adjust,
2481 };
2482
ptp_gmac_init(const struct device * port)2483 static int ptp_gmac_init(const struct device *port)
2484 {
2485 const struct device *const eth_dev = DEVICE_DT_INST_GET(0);
2486 struct eth_sam_dev_data *dev_data = eth_dev->data;
2487 struct ptp_context *ptp_context = port->data;
2488
2489 dev_data->ptp_clock = port;
2490 ptp_context->eth_dev = eth_dev;
2491
2492 return 0;
2493 }
2494
2495 DEVICE_DEFINE(gmac_ptp_clock_0, PTP_CLOCK_NAME, ptp_gmac_init,
2496 NULL, &ptp_gmac_0_context, NULL, POST_KERNEL,
2497 CONFIG_PTP_CLOCK_INIT_PRIORITY, &ptp_api);
2498
2499 #endif /* CONFIG_PTP_CLOCK_SAM_GMAC */
2500