1 /*
2 * Copyright (c) 2016 Piotr Mienkowski
3 * Copyright (c) 2018 Antmicro Ltd
4 * Copyright (c) 2023 Gerson Fernando Budke
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /** @file
10 * @brief Atmel SAM MCU family Ethernet MAC (GMAC) driver.
11 *
12 * This is a zero-copy networking implementation of an Ethernet driver. To
13 * prepare for the incoming frames the driver will permanently reserve a defined
14 * amount of RX data net buffers when the interface is brought up and thus
15 * reduce the total amount of RX data net buffers available to the application.
16 *
17 * Limitations:
18 * - one shot PHY setup, no support for PHY disconnect/reconnect
19 * - no statistics collection
20 */
21
22 #if defined(CONFIG_SOC_FAMILY_ATMEL_SAM)
23 #define DT_DRV_COMPAT atmel_sam_gmac
24 #else
25 #define DT_DRV_COMPAT atmel_sam0_gmac
26 #endif
27
28 #define LOG_MODULE_NAME eth_sam
29 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
30
31 #include <zephyr/logging/log.h>
32 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
33
34 #include <zephyr/kernel.h>
35 #include <zephyr/device.h>
36 #include <zephyr/sys/__assert.h>
37 #include <zephyr/sys/barrier.h>
38 #include <zephyr/sys/util.h>
39 #include <errno.h>
40 #include <stdbool.h>
41 #include <zephyr/net/phy.h>
42 #include <zephyr/net/net_pkt.h>
43 #include <zephyr/net/net_if.h>
44 #include <zephyr/net/ethernet.h>
45 #include <ethernet/eth_stats.h>
46 #include <zephyr/drivers/i2c.h>
47 #include <zephyr/drivers/pinctrl.h>
48 #include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
49 #include <soc.h>
50 #include "eth_sam_gmac_priv.h"
51
52 #include "eth.h"
53
54 #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM0
55 #include "eth_sam0_gmac.h"
56 #endif
57
58 #include <zephyr/drivers/ptp_clock.h>
59 #include <zephyr/net/gptp.h>
60 #include <zephyr/irq.h>
61
62 #ifdef __DCACHE_PRESENT
63 static bool dcache_enabled;
64
dcache_is_enabled(void)65 static inline void dcache_is_enabled(void)
66 {
67 dcache_enabled = (SCB->CCR & SCB_CCR_DC_Msk);
68 }
dcache_invalidate(uint32_t addr,uint32_t size)69 static inline void dcache_invalidate(uint32_t addr, uint32_t size)
70 {
71 if (!dcache_enabled) {
72 return;
73 }
74
75 /* Make sure it is aligned to 32B */
76 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1);
77 uint32_t size_full = size + addr - start_addr;
78
79 SCB_InvalidateDCache_by_Addr((uint32_t *)start_addr, size_full);
80 }
81
dcache_clean(uint32_t addr,uint32_t size)82 static inline void dcache_clean(uint32_t addr, uint32_t size)
83 {
84 if (!dcache_enabled) {
85 return;
86 }
87
88 /* Make sure it is aligned to 32B */
89 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1);
90 uint32_t size_full = size + addr - start_addr;
91
92 SCB_CleanDCache_by_Addr((uint32_t *)start_addr, size_full);
93 }
94 #else
95 #define dcache_is_enabled()
96 #define dcache_invalidate(addr, size)
97 #define dcache_clean(addr, size)
98 #endif
99
100 #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM0
101 #define MCK_FREQ_HZ SOC_ATMEL_SAM0_MCK_FREQ_HZ
102 #elif CONFIG_SOC_FAMILY_ATMEL_SAM
103 #define MCK_FREQ_HZ SOC_ATMEL_SAM_MCK_FREQ_HZ
104 #else
105 #error Unsupported SoC family
106 #endif
107
108 /*
109 * Verify Kconfig configuration
110 */
111 /* No need to verify things for unit tests */
112 #if !defined(CONFIG_NET_TEST)
113 #if CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT \
114 < GMAC_FRAME_SIZE_MAX
115 #error CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT is \
116 not large enough to hold a full frame
117 #endif
118
119 #if CONFIG_NET_BUF_DATA_SIZE * (CONFIG_NET_BUF_RX_COUNT - \
120 CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) < GMAC_FRAME_SIZE_MAX
121 #error (CONFIG_NET_BUF_RX_COUNT - CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) * \
122 CONFIG_NET_BUF_DATA_SIZE are not large enough to hold a full frame
123 #endif
124
125 #if CONFIG_NET_BUF_DATA_SIZE & 0x3F
126 #pragma message "CONFIG_NET_BUF_DATA_SIZE should be a multiple of 64 bytes " \
127 "due to the granularity of RX DMA"
128 #endif
129
130 #if (CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT + 1) * GMAC_ACTIVE_QUEUE_NUM \
131 > CONFIG_NET_BUF_RX_COUNT
132 #error Not enough RX buffers to allocate descriptors for each HW queue
133 #endif
134 #endif /* !CONFIG_NET_TEST */
135
136 BUILD_ASSERT(DT_INST_ENUM_IDX(0, phy_connection_type) <= 1, "Invalid PHY connection");
137
138 /* RX descriptors list */
139 static struct gmac_desc rx_desc_que0[MAIN_QUEUE_RX_DESC_COUNT]
140 __nocache __aligned(GMAC_DESC_ALIGNMENT);
141 #if GMAC_PRIORITY_QUEUE_NUM >= 1
142 static struct gmac_desc rx_desc_que1[PRIORITY_QUEUE1_RX_DESC_COUNT]
143 __nocache __aligned(GMAC_DESC_ALIGNMENT);
144 #endif
145 #if GMAC_PRIORITY_QUEUE_NUM >= 2
146 static struct gmac_desc rx_desc_que2[PRIORITY_QUEUE2_RX_DESC_COUNT]
147 __nocache __aligned(GMAC_DESC_ALIGNMENT);
148 #endif
149 #if GMAC_PRIORITY_QUEUE_NUM >= 3
150 static struct gmac_desc rx_desc_que3[PRIORITY_QUEUE3_RX_DESC_COUNT]
151 __nocache __aligned(GMAC_DESC_ALIGNMENT);
152 #endif
153 #if GMAC_PRIORITY_QUEUE_NUM >= 4
154 static struct gmac_desc rx_desc_que4[PRIORITY_QUEUE4_RX_DESC_COUNT]
155 __nocache __aligned(GMAC_DESC_ALIGNMENT);
156 #endif
157 #if GMAC_PRIORITY_QUEUE_NUM >= 5
158 static struct gmac_desc rx_desc_que5[PRIORITY_QUEUE5_RX_DESC_COUNT]
159 __nocache __aligned(GMAC_DESC_ALIGNMENT);
160 #endif
161
162 /* TX descriptors list */
163 static struct gmac_desc tx_desc_que0[MAIN_QUEUE_TX_DESC_COUNT]
164 __nocache __aligned(GMAC_DESC_ALIGNMENT);
165 #if GMAC_PRIORITY_QUEUE_NUM >= 1
166 static struct gmac_desc tx_desc_que1[PRIORITY_QUEUE1_TX_DESC_COUNT]
167 __nocache __aligned(GMAC_DESC_ALIGNMENT);
168 #endif
169 #if GMAC_PRIORITY_QUEUE_NUM >= 2
170 static struct gmac_desc tx_desc_que2[PRIORITY_QUEUE2_TX_DESC_COUNT]
171 __nocache __aligned(GMAC_DESC_ALIGNMENT);
172 #endif
173 #if GMAC_PRIORITY_QUEUE_NUM >= 3
174 static struct gmac_desc tx_desc_que3[PRIORITY_QUEUE3_TX_DESC_COUNT]
175 __nocache __aligned(GMAC_DESC_ALIGNMENT);
176 #endif
177 #if GMAC_PRIORITY_QUEUE_NUM >= 4
178 static struct gmac_desc tx_desc_que4[PRIORITY_QUEUE4_TX_DESC_COUNT]
179 __nocache __aligned(GMAC_DESC_ALIGNMENT);
180 #endif
181 #if GMAC_PRIORITY_QUEUE_NUM >= 5
182 static struct gmac_desc tx_desc_que5[PRIORITY_QUEUE5_TX_DESC_COUNT]
183 __nocache __aligned(GMAC_DESC_ALIGNMENT);
184 #endif
185
186 /* RX buffer accounting list */
187 static struct net_buf *rx_frag_list_que0[MAIN_QUEUE_RX_DESC_COUNT];
188 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
189 static struct net_buf *rx_frag_list_que1[PRIORITY_QUEUE1_RX_DESC_COUNT];
190 #endif
191 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
192 static struct net_buf *rx_frag_list_que2[PRIORITY_QUEUE2_RX_DESC_COUNT];
193 #endif
194 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
195 static struct net_buf *rx_frag_list_que3[PRIORITY_QUEUE3_RX_DESC_COUNT];
196 #endif
197 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
198 static struct net_buf *rx_frag_list_que4[PRIORITY_QUEUE4_RX_DESC_COUNT];
199 #endif
200 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
201 static struct net_buf *rx_frag_list_que5[PRIORITY_QUEUE5_RX_DESC_COUNT];
202 #endif
203
204 #if GMAC_MULTIPLE_TX_PACKETS == 1
205 /* TX buffer accounting list */
206 static struct net_buf *tx_frag_list_que0[MAIN_QUEUE_TX_DESC_COUNT];
207 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
208 static struct net_buf *tx_frag_list_que1[PRIORITY_QUEUE1_TX_DESC_COUNT];
209 #endif
210 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
211 static struct net_buf *tx_frag_list_que2[PRIORITY_QUEUE2_TX_DESC_COUNT];
212 #endif
213 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
214 static struct net_buf *tx_frag_list_que3[PRIORITY_QUEUE3_TX_DESC_COUNT];
215 #endif
216 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
217 static struct net_buf *tx_frag_list_que4[PRIORITY_QUEUE4_TX_DESC_COUNT];
218 #endif
219 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
220 static struct net_buf *tx_frag_list_que5[PRIORITY_QUEUE5_TX_DESC_COUNT];
221 #endif
222
223 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
224 /* TX frames accounting list */
225 static struct net_pkt *tx_frame_list_que0[CONFIG_NET_PKT_TX_COUNT + 1];
226 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
227 static struct net_pkt *tx_frame_list_que1[CONFIG_NET_PKT_TX_COUNT + 1];
228 #endif
229 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
230 static struct net_pkt *tx_frame_list_que2[CONFIG_NET_PKT_TX_COUNT + 1];
231 #endif
232 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
233 static struct net_pkt *tx_frame_list_que3[CONFIG_NET_PKT_TX_COUNT + 1];
234 #endif
235 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
236 static struct net_pkt *tx_frame_list_que4[CONFIG_NET_PKT_TX_COUNT + 1];
237 #endif
238 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
239 static struct net_pkt *tx_frame_list_que5[CONFIG_NET_PKT_TX_COUNT + 1];
240 #endif
241 #endif
242 #endif
243
244 #define MODULO_INC(val, max) {val = (++val < max) ? val : 0; }
245
246 static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue);
247 static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue);
248 static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue);
249
250 #if GMAC_PRIORITY_QUEUE_NUM >= 1
set_receive_buf_queue_pointer(Gmac * gmac,struct gmac_queue * queue)251 static inline void set_receive_buf_queue_pointer(Gmac *gmac,
252 struct gmac_queue *queue)
253 {
254 /* Set Receive Buffer Queue Pointer Register */
255 if (queue->que_idx == GMAC_QUE_0) {
256 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
257 } else {
258 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] =
259 (uint32_t)queue->rx_desc_list.buf;
260 }
261 }
262
disable_all_priority_queue_interrupt(Gmac * gmac)263 static inline void disable_all_priority_queue_interrupt(Gmac *gmac)
264 {
265 uint32_t idx;
266
267 for (idx = 0; idx < GMAC_PRIORITY_QUEUE_NUM; idx++) {
268 gmac->GMAC_IDRPQ[idx] = UINT32_MAX;
269 (void)gmac->GMAC_ISRPQ[idx];
270 }
271 }
272
priority_queue_init(Gmac * gmac,struct gmac_queue * queue)273 static int priority_queue_init(Gmac *gmac, struct gmac_queue *queue)
274 {
275 int result;
276 int queue_index;
277
278 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0);
279 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0);
280 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk),
281 "RX descriptors have to be word aligned");
282 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk),
283 "TX descriptors have to be word aligned");
284
285 /* Extract queue index for easier referencing */
286 queue_index = queue->que_idx - 1;
287
288 /* Setup descriptor lists */
289 result = rx_descriptors_init(gmac, queue);
290 if (result < 0) {
291 return result;
292 }
293
294 tx_descriptors_init(gmac, queue);
295
296 #if GMAC_MULTIPLE_TX_PACKETS == 0
297 k_sem_init(&queue->tx_sem, 0, 1);
298 #else
299 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
300 queue->tx_desc_list.len - 1);
301 #endif
302
303 /* Setup RX buffer size for DMA */
304 gmac->GMAC_RBSRPQ[queue_index] =
305 GMAC_RBSRPQ_RBS(CONFIG_NET_BUF_DATA_SIZE >> 6);
306
307 /* Set Receive Buffer Queue Pointer Register */
308 gmac->GMAC_RBQBAPQ[queue_index] = (uint32_t)queue->rx_desc_list.buf;
309 /* Set Transmit Buffer Queue Pointer Register */
310 gmac->GMAC_TBQBAPQ[queue_index] = (uint32_t)queue->tx_desc_list.buf;
311
312 /* Enable RX/TX completion and error interrupts */
313 gmac->GMAC_IERPQ[queue_index] = GMAC_INTPQ_EN_FLAGS;
314
315 queue->err_rx_frames_dropped = 0U;
316 queue->err_rx_flushed_count = 0U;
317 queue->err_tx_flushed_count = 0U;
318
319 LOG_INF("Queue %d activated", queue->que_idx);
320
321 return 0;
322 }
323
priority_queue_init_as_idle(Gmac * gmac,struct gmac_queue * queue)324 static int priority_queue_init_as_idle(Gmac *gmac, struct gmac_queue *queue)
325 {
326 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
327 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
328
329 __ASSERT(!((uint32_t)rx_desc_list->buf & ~GMAC_RBQB_ADDR_Msk),
330 "RX descriptors have to be word aligned");
331 __ASSERT(!((uint32_t)tx_desc_list->buf & ~GMAC_TBQB_ADDR_Msk),
332 "TX descriptors have to be word aligned");
333 __ASSERT((rx_desc_list->len == 1U) && (tx_desc_list->len == 1U),
334 "Priority queues are currently not supported, descriptor "
335 "list has to have a single entry");
336
337 /* Setup RX descriptor lists */
338 /* Take ownership from GMAC and set the wrap bit */
339 rx_desc_list->buf[0].w0 = GMAC_RXW0_WRAP;
340 rx_desc_list->buf[0].w1 = 0U;
341 /* Setup TX descriptor lists */
342 tx_desc_list->buf[0].w0 = 0U;
343 /* Take ownership from GMAC and set the wrap bit */
344 tx_desc_list->buf[0].w1 = GMAC_TXW1_USED | GMAC_TXW1_WRAP;
345
346 /* Set Receive Buffer Queue Pointer Register */
347 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = (uint32_t)rx_desc_list->buf;
348 /* Set Transmit Buffer Queue Pointer Register */
349 gmac->GMAC_TBQBAPQ[queue->que_idx - 1] = (uint32_t)tx_desc_list->buf;
350
351 LOG_INF("Queue %d set to idle", queue->que_idx);
352
353 return 0;
354 }
355
queue_init(Gmac * gmac,struct gmac_queue * queue)356 static int queue_init(Gmac *gmac, struct gmac_queue *queue)
357 {
358 if (queue->que_idx == GMAC_QUE_0) {
359 return nonpriority_queue_init(gmac, queue);
360 } else if (queue->que_idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
361 return priority_queue_init(gmac, queue);
362 } else {
363 return priority_queue_init_as_idle(gmac, queue);
364 }
365 }
366
367 #else
368
set_receive_buf_queue_pointer(Gmac * gmac,struct gmac_queue * queue)369 static inline void set_receive_buf_queue_pointer(Gmac *gmac,
370 struct gmac_queue *queue)
371 {
372 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
373 }
374
queue_init(Gmac * gmac,struct gmac_queue * queue)375 static int queue_init(Gmac *gmac, struct gmac_queue *queue)
376 {
377 return nonpriority_queue_init(gmac, queue);
378 }
379
380 #define disable_all_priority_queue_interrupt(gmac)
381
382 #endif
383
384 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
385 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable);
386
eth_sam_gmac_init_qav(Gmac * gmac)387 static inline void eth_sam_gmac_init_qav(Gmac *gmac)
388 {
389 uint32_t idx;
390
391 for (idx = GMAC_QUE_1; idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM; idx++) {
392 eth_sam_gmac_setup_qav(gmac, idx, true);
393 }
394 }
395
396 #else
397
398 #define eth_sam_gmac_init_qav(gmac)
399
400 #endif
401
402 #if GMAC_MULTIPLE_TX_PACKETS == 1
403 /*
404 * Reset ring buffer
405 */
ring_buf_reset(struct ring_buf * rb)406 static void ring_buf_reset(struct ring_buf *rb)
407 {
408 rb->head = 0U;
409 rb->tail = 0U;
410 }
411
412 /*
413 * Get one 32 bit item from the ring buffer
414 */
ring_buf_get(struct ring_buf * rb)415 static uint32_t ring_buf_get(struct ring_buf *rb)
416 {
417 uint32_t val;
418
419 __ASSERT(rb->tail != rb->head,
420 "retrieving data from empty ring buffer");
421
422 val = rb->buf[rb->tail];
423 MODULO_INC(rb->tail, rb->len);
424
425 return val;
426 }
427
428 /*
429 * Put one 32 bit item into the ring buffer
430 */
ring_buf_put(struct ring_buf * rb,uint32_t val)431 static void ring_buf_put(struct ring_buf *rb, uint32_t val)
432 {
433 rb->buf[rb->head] = val;
434 MODULO_INC(rb->head, rb->len);
435
436 __ASSERT(rb->tail != rb->head,
437 "ring buffer overflow");
438 }
439 #endif
440
441 /*
442 * Free pre-reserved RX buffers
443 */
free_rx_bufs(struct net_buf ** rx_frag_list,uint16_t len)444 static void free_rx_bufs(struct net_buf **rx_frag_list, uint16_t len)
445 {
446 for (int i = 0; i < len; i++) {
447 if (rx_frag_list[i]) {
448 net_buf_unref(rx_frag_list[i]);
449 rx_frag_list[i] = NULL;
450 }
451 }
452 }
453
454 /*
455 * Set MAC Address for frame filtering logic
456 */
mac_addr_set(Gmac * gmac,uint8_t index,uint8_t mac_addr[6])457 static void mac_addr_set(Gmac *gmac, uint8_t index,
458 uint8_t mac_addr[6])
459 {
460 __ASSERT(index < 4, "index has to be in the range 0..3");
461
462 gmac->GMAC_SA[index].GMAC_SAB = (mac_addr[3] << 24)
463 | (mac_addr[2] << 16)
464 | (mac_addr[1] << 8)
465 | (mac_addr[0]);
466 gmac->GMAC_SA[index].GMAC_SAT = (mac_addr[5] << 8)
467 | (mac_addr[4]);
468 }
469
470 /*
471 * Initialize RX descriptor list
472 */
rx_descriptors_init(Gmac * gmac,struct gmac_queue * queue)473 static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
474 {
475 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
476 struct net_buf **rx_frag_list = queue->rx_frag_list;
477 struct net_buf *rx_buf;
478 uint8_t *rx_buf_addr;
479
480 __ASSERT_NO_MSG(rx_frag_list);
481
482 rx_desc_list->tail = 0U;
483
484 for (int i = 0; i < rx_desc_list->len; i++) {
485 rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE,
486 K_NO_WAIT);
487 if (rx_buf == NULL) {
488 free_rx_bufs(rx_frag_list, rx_desc_list->len);
489 LOG_ERR("Failed to reserve data net buffers");
490 return -ENOBUFS;
491 }
492
493 rx_frag_list[i] = rx_buf;
494
495 rx_buf_addr = rx_buf->data;
496 __ASSERT(!((uint32_t)rx_buf_addr & ~GMAC_RXW0_ADDR),
497 "Misaligned RX buffer address");
498 __ASSERT(rx_buf->size == CONFIG_NET_BUF_DATA_SIZE,
499 "Incorrect length of RX data buffer");
500 /* Give ownership to GMAC and remove the wrap bit */
501 rx_desc_list->buf[i].w0 = (uint32_t)rx_buf_addr & GMAC_RXW0_ADDR;
502 rx_desc_list->buf[i].w1 = 0U;
503 }
504
505 /* Set the wrap bit on the last descriptor */
506 rx_desc_list->buf[rx_desc_list->len - 1U].w0 |= GMAC_RXW0_WRAP;
507
508 return 0;
509 }
510
511 /*
512 * Initialize TX descriptor list
513 */
tx_descriptors_init(Gmac * gmac,struct gmac_queue * queue)514 static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
515 {
516 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
517
518 tx_desc_list->head = 0U;
519 tx_desc_list->tail = 0U;
520
521 for (int i = 0; i < tx_desc_list->len; i++) {
522 tx_desc_list->buf[i].w0 = 0U;
523 tx_desc_list->buf[i].w1 = GMAC_TXW1_USED;
524 }
525
526 /* Set the wrap bit on the last descriptor */
527 tx_desc_list->buf[tx_desc_list->len - 1U].w1 |= GMAC_TXW1_WRAP;
528
529 #if GMAC_MULTIPLE_TX_PACKETS == 1
530 /* Reset TX frame list */
531 ring_buf_reset(&queue->tx_frag_list);
532 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
533 ring_buf_reset(&queue->tx_frames);
534 #endif
535 #endif
536 }
537
538 #if defined(CONFIG_NET_GPTP)
check_gptp_msg(struct net_if * iface,struct net_pkt * pkt,bool is_tx)539 static struct gptp_hdr *check_gptp_msg(struct net_if *iface,
540 struct net_pkt *pkt,
541 bool is_tx)
542 {
543 uint8_t *msg_start = net_pkt_data(pkt);
544 struct gptp_hdr *gptp_hdr;
545 int eth_hlen;
546 struct net_eth_hdr *hdr;
547
548 hdr = (struct net_eth_hdr *)msg_start;
549 if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) {
550 return NULL;
551 }
552
553 eth_hlen = sizeof(struct net_eth_hdr);
554
555 /* In TX, the first net_buf contains the Ethernet header
556 * and the actual gPTP header is in the second net_buf.
557 * In RX, the Ethernet header + other headers are in the
558 * first net_buf.
559 */
560 if (is_tx) {
561 if (pkt->frags->frags == NULL) {
562 return false;
563 }
564
565 gptp_hdr = (struct gptp_hdr *)pkt->frags->frags->data;
566 } else {
567 gptp_hdr = (struct gptp_hdr *)(pkt->frags->data + eth_hlen);
568 }
569
570 return gptp_hdr;
571 }
572
need_timestamping(struct gptp_hdr * hdr)573 static bool need_timestamping(struct gptp_hdr *hdr)
574 {
575 switch (hdr->message_type) {
576 case GPTP_SYNC_MESSAGE:
577 case GPTP_PATH_DELAY_RESP_MESSAGE:
578 return true;
579 default:
580 return false;
581 }
582 }
583
update_pkt_priority(struct gptp_hdr * hdr,struct net_pkt * pkt)584 static void update_pkt_priority(struct gptp_hdr *hdr, struct net_pkt *pkt)
585 {
586 if (GPTP_IS_EVENT_MSG(hdr->message_type)) {
587 net_pkt_set_priority(pkt, NET_PRIORITY_CA);
588 } else {
589 net_pkt_set_priority(pkt, NET_PRIORITY_IC);
590 }
591 }
592
get_ptp_event_rx_ts(Gmac * gmac)593 static inline struct net_ptp_time get_ptp_event_rx_ts(Gmac *gmac)
594 {
595 struct net_ptp_time ts;
596
597 ts.second = ((uint64_t)(gmac->GMAC_EFRSH & 0xffff) << 32)
598 | gmac->GMAC_EFRSL;
599 ts.nanosecond = gmac->GMAC_EFRN;
600
601 return ts;
602 }
603
get_ptp_peer_event_rx_ts(Gmac * gmac)604 static inline struct net_ptp_time get_ptp_peer_event_rx_ts(Gmac *gmac)
605 {
606 struct net_ptp_time ts;
607
608 ts.second = ((uint64_t)(gmac->GMAC_PEFRSH & 0xffff) << 32)
609 | gmac->GMAC_PEFRSL;
610 ts.nanosecond = gmac->GMAC_PEFRN;
611
612 return ts;
613 }
614
get_ptp_event_tx_ts(Gmac * gmac)615 static inline struct net_ptp_time get_ptp_event_tx_ts(Gmac *gmac)
616 {
617 struct net_ptp_time ts;
618
619 ts.second = ((uint64_t)(gmac->GMAC_EFTSH & 0xffff) << 32)
620 | gmac->GMAC_EFTSL;
621 ts.nanosecond = gmac->GMAC_EFTN;
622
623 return ts;
624 }
625
get_ptp_peer_event_tx_ts(Gmac * gmac)626 static inline struct net_ptp_time get_ptp_peer_event_tx_ts(Gmac *gmac)
627 {
628 struct net_ptp_time ts;
629
630 ts.second = ((uint64_t)(gmac->GMAC_PEFTSH & 0xffff) << 32)
631 | gmac->GMAC_PEFTSL;
632 ts.nanosecond = gmac->GMAC_PEFTN;
633
634 return ts;
635 }
636
get_current_ts(Gmac * gmac)637 static inline struct net_ptp_time get_current_ts(Gmac *gmac)
638 {
639 struct net_ptp_time ts;
640
641 ts.second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL;
642 ts.nanosecond = gmac->GMAC_TN;
643
644 return ts;
645 }
646
647
timestamp_tx_pkt(Gmac * gmac,struct gptp_hdr * hdr,struct net_pkt * pkt)648 static inline void timestamp_tx_pkt(Gmac *gmac, struct gptp_hdr *hdr,
649 struct net_pkt *pkt)
650 {
651 struct net_ptp_time timestamp;
652
653 if (hdr) {
654 switch (hdr->message_type) {
655 case GPTP_SYNC_MESSAGE:
656 timestamp = get_ptp_event_tx_ts(gmac);
657 break;
658 default:
659 timestamp = get_ptp_peer_event_tx_ts(gmac);
660 }
661 } else {
662 timestamp = get_current_ts(gmac);
663 }
664
665 net_pkt_set_timestamp(pkt, ×tamp);
666 }
667
timestamp_rx_pkt(Gmac * gmac,struct gptp_hdr * hdr,struct net_pkt * pkt)668 static inline void timestamp_rx_pkt(Gmac *gmac, struct gptp_hdr *hdr,
669 struct net_pkt *pkt)
670 {
671 struct net_ptp_time timestamp;
672
673 if (hdr) {
674 switch (hdr->message_type) {
675 case GPTP_SYNC_MESSAGE:
676 timestamp = get_ptp_event_rx_ts(gmac);
677 break;
678 default:
679 timestamp = get_ptp_peer_event_rx_ts(gmac);
680 }
681 } else {
682 timestamp = get_current_ts(gmac);
683 }
684
685 net_pkt_set_timestamp(pkt, ×tamp);
686 }
687
688 #endif
689
get_iface(struct eth_sam_dev_data * ctx)690 static inline struct net_if *get_iface(struct eth_sam_dev_data *ctx)
691 {
692 return ctx->iface;
693 }
694
695 /*
696 * Process successfully sent packets
697 */
tx_completed(Gmac * gmac,struct gmac_queue * queue)698 static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
699 {
700 #if GMAC_MULTIPLE_TX_PACKETS == 0
701 k_sem_give(&queue->tx_sem);
702 #else
703 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
704 struct gmac_desc *tx_desc;
705 struct net_buf *frag;
706 #if defined(CONFIG_NET_GPTP)
707 struct net_pkt *pkt;
708 struct gptp_hdr *hdr;
709 struct eth_sam_dev_data *dev_data =
710 CONTAINER_OF(queue, struct eth_sam_dev_data,
711 queue_list[queue->que_idx]);
712 #endif
713
714 __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED,
715 "first buffer of a frame is not marked as own by GMAC");
716
717 while (tx_desc_list->tail != tx_desc_list->head) {
718
719 tx_desc = &tx_desc_list->buf[tx_desc_list->tail];
720 MODULO_INC(tx_desc_list->tail, tx_desc_list->len);
721 k_sem_give(&queue->tx_desc_sem);
722
723 /* Release net buffer to the buffer pool */
724 frag = UINT_TO_POINTER(ring_buf_get(&queue->tx_frag_list));
725 net_pkt_frag_unref(frag);
726 LOG_DBG("Dropping frag %p", frag);
727
728 if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) {
729 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
730 /* Release net packet to the packet pool */
731 pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames));
732
733 #if defined(CONFIG_NET_GPTP)
734 hdr = check_gptp_msg(get_iface(dev_data),
735 pkt, true);
736
737 timestamp_tx_pkt(gmac, hdr, pkt);
738
739 if (hdr && need_timestamping(hdr)) {
740 net_if_add_tx_timestamp(pkt);
741 }
742 #endif
743 net_pkt_unref(pkt);
744 LOG_DBG("Dropping pkt %p", pkt);
745 #endif
746 break;
747 }
748 }
749 #endif
750 }
751
752 /*
753 * Reset TX queue when errors are detected
754 */
tx_error_handler(Gmac * gmac,struct gmac_queue * queue)755 static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
756 {
757 #if GMAC_MULTIPLE_TX_PACKETS == 1
758 struct net_buf *frag;
759 struct ring_buf *tx_frag_list = &queue->tx_frag_list;
760 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
761 struct net_pkt *pkt;
762 struct ring_buf *tx_frames = &queue->tx_frames;
763 #endif
764 #endif
765
766 queue->err_tx_flushed_count++;
767
768 /* Stop transmission, clean transmit pipeline and control registers */
769 gmac->GMAC_NCR &= ~GMAC_NCR_TXEN;
770
771 #if GMAC_MULTIPLE_TX_PACKETS == 1
772 /* Free all frag resources in the TX path */
773 while (tx_frag_list->tail != tx_frag_list->head) {
774 /* Release net buffer to the buffer pool */
775 frag = UINT_TO_POINTER(tx_frag_list->buf[tx_frag_list->tail]);
776 net_pkt_frag_unref(frag);
777 LOG_DBG("Dropping frag %p", frag);
778 MODULO_INC(tx_frag_list->tail, tx_frag_list->len);
779 }
780
781 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
782 /* Free all pkt resources in the TX path */
783 while (tx_frames->tail != tx_frames->head) {
784 /* Release net packet to the packet pool */
785 pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]);
786 net_pkt_unref(pkt);
787 LOG_DBG("Dropping pkt %p", pkt);
788 MODULO_INC(tx_frames->tail, tx_frames->len);
789 }
790 #endif
791
792 /* Reinitialize TX descriptor list */
793 k_sem_reset(&queue->tx_desc_sem);
794 for (int i = 0; i < queue->tx_desc_list.len - 1; i++) {
795 k_sem_give(&queue->tx_desc_sem);
796 }
797 #endif
798 tx_descriptors_init(gmac, queue);
799
800 #if GMAC_MULTIPLE_TX_PACKETS == 0
801 /* Reinitialize TX mutex */
802 k_sem_give(&queue->tx_sem);
803 #endif
804
805 /* Restart transmission */
806 gmac->GMAC_NCR |= GMAC_NCR_TXEN;
807 }
808
809 /*
810 * Clean RX queue, any received data still stored in the buffers is abandoned.
811 */
rx_error_handler(Gmac * gmac,struct gmac_queue * queue)812 static void rx_error_handler(Gmac *gmac, struct gmac_queue *queue)
813 {
814 queue->err_rx_flushed_count++;
815
816 /* Stop reception */
817 gmac->GMAC_NCR &= ~GMAC_NCR_RXEN;
818
819 queue->rx_desc_list.tail = 0U;
820
821 for (int i = 0; i < queue->rx_desc_list.len; i++) {
822 queue->rx_desc_list.buf[i].w1 = 0U;
823 queue->rx_desc_list.buf[i].w0 &= ~GMAC_RXW0_OWNERSHIP;
824 }
825
826 set_receive_buf_queue_pointer(gmac, queue);
827
828 /* Restart reception */
829 gmac->GMAC_NCR |= GMAC_NCR_RXEN;
830 }
831
832 /*
833 * Set MCK to MDC clock divisor.
834 *
835 * According to 802.3 MDC should be less then 2.5 MHz.
836 */
get_mck_clock_divisor(uint32_t mck)837 static int get_mck_clock_divisor(uint32_t mck)
838 {
839 uint32_t mck_divisor;
840
841 if (mck <= 20000000U) {
842 mck_divisor = GMAC_NCFGR_CLK_MCK_8;
843 } else if (mck <= 40000000U) {
844 mck_divisor = GMAC_NCFGR_CLK_MCK_16;
845 } else if (mck <= 80000000U) {
846 mck_divisor = GMAC_NCFGR_CLK_MCK_32;
847 } else if (mck <= 120000000U) {
848 mck_divisor = GMAC_NCFGR_CLK_MCK_48;
849 } else if (mck <= 160000000U) {
850 mck_divisor = GMAC_NCFGR_CLK_MCK_64;
851 } else if (mck <= 240000000U) {
852 mck_divisor = GMAC_NCFGR_CLK_MCK_96;
853 } else {
854 LOG_ERR("No valid MDC clock");
855 mck_divisor = -ENOTSUP;
856 }
857
858 return mck_divisor;
859 }
860
861 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_setup_qav(Gmac * gmac,int queue_id,bool enable)862 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable)
863 {
864 /* Verify queue id */
865 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
866 return -EINVAL;
867 }
868
869 if (queue_id == GMAC_QUE_2) {
870 if (enable) {
871 gmac->GMAC_CBSCR |= GMAC_CBSCR_QAE;
872 } else {
873 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE;
874 }
875 } else {
876 if (enable) {
877 gmac->GMAC_CBSCR |= GMAC_CBSCR_QBE;
878 } else {
879 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE;
880 }
881 }
882
883 return 0;
884 }
885
eth_sam_gmac_get_qav_status(Gmac * gmac,int queue_id,bool * enabled)886 static int eth_sam_gmac_get_qav_status(Gmac *gmac, int queue_id, bool *enabled)
887 {
888 /* Verify queue id */
889 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
890 return -EINVAL;
891 }
892
893 if (queue_id == GMAC_QUE_2) {
894 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QAE;
895 } else {
896 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QBE;
897 }
898
899 return 0;
900 }
901
eth_sam_gmac_setup_qav_idle_slope(Gmac * gmac,int queue_id,unsigned int idle_slope)902 static int eth_sam_gmac_setup_qav_idle_slope(Gmac *gmac, int queue_id,
903 unsigned int idle_slope)
904 {
905 uint32_t cbscr_val;
906
907 /* Verify queue id */
908 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
909 return -EINVAL;
910 }
911
912 cbscr_val = gmac->GMAC_CBSISQA;
913
914 if (queue_id == GMAC_QUE_2) {
915 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE;
916 gmac->GMAC_CBSISQA = idle_slope;
917 } else {
918 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE;
919 gmac->GMAC_CBSISQB = idle_slope;
920 }
921
922 gmac->GMAC_CBSCR = cbscr_val;
923
924 return 0;
925 }
926
eth_sam_gmac_get_bandwidth(Gmac * gmac)927 static uint32_t eth_sam_gmac_get_bandwidth(Gmac *gmac)
928 {
929 uint32_t bandwidth;
930
931 /* See if we operate in 10Mbps or 100Mbps mode,
932 * Note: according to the manual, portTransmitRate is 0x07735940 for
933 * 1Gbps - therefore we cannot use the KB/MB macros - we have to
934 * multiply it by a round 1000 to get it right.
935 */
936 if (gmac->GMAC_NCFGR & GMAC_NCFGR_SPD) {
937 /* 100Mbps */
938 bandwidth = (100 * 1000 * 1000) / 8;
939 } else {
940 /* 10Mbps */
941 bandwidth = (10 * 1000 * 1000) / 8;
942 }
943
944 return bandwidth;
945 }
946
eth_sam_gmac_get_qav_idle_slope(Gmac * gmac,int queue_id,unsigned int * idle_slope)947 static int eth_sam_gmac_get_qav_idle_slope(Gmac *gmac, int queue_id,
948 unsigned int *idle_slope)
949 {
950 /* Verify queue id */
951 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
952 return -EINVAL;
953 }
954
955 if (queue_id == GMAC_QUE_2) {
956 *idle_slope = gmac->GMAC_CBSISQA;
957 } else {
958 *idle_slope = gmac->GMAC_CBSISQB;
959 }
960
961 /* Convert to bps as expected by upper layer */
962 *idle_slope *= 8U;
963
964 return 0;
965 }
966
eth_sam_gmac_get_qav_delta_bandwidth(Gmac * gmac,int queue_id,unsigned int * delta_bandwidth)967 static int eth_sam_gmac_get_qav_delta_bandwidth(Gmac *gmac, int queue_id,
968 unsigned int *delta_bandwidth)
969 {
970 uint32_t bandwidth;
971 unsigned int idle_slope;
972 int ret;
973
974 ret = eth_sam_gmac_get_qav_idle_slope(gmac, queue_id, &idle_slope);
975 if (ret) {
976 return ret;
977 }
978
979 /* Calculate in Bps */
980 idle_slope /= 8U;
981
982 /* Get bandwidth and convert to bps */
983 bandwidth = eth_sam_gmac_get_bandwidth(gmac);
984
985 /* Calculate percentage - instead of multiplying idle_slope by 100,
986 * divide bandwidth - these numbers are so large that it should not
987 * influence the outcome and saves us from employing larger data types.
988 */
989 *delta_bandwidth = idle_slope / (bandwidth / 100U);
990
991 return 0;
992 }
993
eth_sam_gmac_setup_qav_delta_bandwidth(Gmac * gmac,int queue_id,int queue_share)994 static int eth_sam_gmac_setup_qav_delta_bandwidth(Gmac *gmac, int queue_id,
995 int queue_share)
996 {
997 uint32_t bandwidth;
998 uint32_t idle_slope;
999
1000 /* Verify queue id */
1001 if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) {
1002 return -EINVAL;
1003 }
1004
1005 bandwidth = eth_sam_gmac_get_bandwidth(gmac);
1006
1007 idle_slope = (bandwidth * queue_share) / 100U;
1008
1009 return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id, idle_slope);
1010 }
1011 #endif
1012
1013 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
gmac_setup_ptp_clock_divisors(Gmac * gmac)1014 static void gmac_setup_ptp_clock_divisors(Gmac *gmac)
1015 {
1016 int mck_divs[] = {10, 5, 2};
1017 double min_cycles;
1018 double min_period;
1019 int div;
1020 int i;
1021
1022 uint8_t cns, acns, nit;
1023
1024 min_cycles = MCK_FREQ_HZ;
1025 min_period = NSEC_PER_SEC;
1026
1027 for (i = 0; i < ARRAY_SIZE(mck_divs); ++i) {
1028 div = mck_divs[i];
1029 while ((double)(min_cycles / div) == (int)(min_cycles / div) &&
1030 (double)(min_period / div) == (int)(min_period / div)) {
1031 min_cycles /= div;
1032 min_period /= div;
1033 }
1034 }
1035
1036 nit = min_cycles - 1;
1037 cns = 0U;
1038 acns = 0U;
1039
1040 while ((cns + 2) * nit < min_period) {
1041 cns++;
1042 }
1043
1044 acns = min_period - (nit * cns);
1045
1046 gmac->GMAC_TI =
1047 GMAC_TI_CNS(cns) | GMAC_TI_ACNS(acns) | GMAC_TI_NIT(nit);
1048 gmac->GMAC_TISUBN = 0;
1049 }
1050 #endif
1051
gmac_init(Gmac * gmac,uint32_t gmac_ncfgr_val)1052 static int gmac_init(Gmac *gmac, uint32_t gmac_ncfgr_val)
1053 {
1054 int mck_divisor;
1055
1056 mck_divisor = get_mck_clock_divisor(MCK_FREQ_HZ);
1057 if (mck_divisor < 0) {
1058 return mck_divisor;
1059 }
1060
1061 /* Set Network Control Register to its default value, clear stats. */
1062 gmac->GMAC_NCR = GMAC_NCR_CLRSTAT | GMAC_NCR_MPE;
1063
1064 /* Disable all interrupts */
1065 gmac->GMAC_IDR = UINT32_MAX;
1066 /* Clear all interrupts */
1067 (void)gmac->GMAC_ISR;
1068 disable_all_priority_queue_interrupt(gmac);
1069
1070 /* Setup Hash Registers - enable reception of all multicast frames when
1071 * GMAC_NCFGR_MTIHEN is set.
1072 */
1073 gmac->GMAC_HRB = UINT32_MAX;
1074 gmac->GMAC_HRT = UINT32_MAX;
1075 /* Setup Network Configuration Register */
1076 gmac->GMAC_NCFGR = gmac_ncfgr_val | mck_divisor;
1077
1078 /* Default (RMII) is defined at atmel,gmac-common.yaml file */
1079 switch (DT_INST_ENUM_IDX(0, phy_connection_type)) {
1080 case 0: /* mii */
1081 gmac->GMAC_UR = 0x1;
1082 break;
1083 case 1: /* rmii */
1084 gmac->GMAC_UR = 0x0;
1085 break;
1086 default:
1087 /* Build assert at top of file should catch this case */
1088 LOG_ERR("The phy connection type is invalid");
1089
1090 return -EINVAL;
1091 }
1092
1093 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1094 /* Initialize PTP Clock Registers */
1095 gmac_setup_ptp_clock_divisors(gmac);
1096
1097 gmac->GMAC_TN = 0;
1098 gmac->GMAC_TSH = 0;
1099 gmac->GMAC_TSL = 0;
1100 #endif
1101
1102 /* Enable Qav if priority queues are used, and setup the default delta
1103 * bandwidth according to IEEE802.1Qav (34.3.1)
1104 */
1105 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 1
1106 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 75);
1107 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 2
1108 /* For multiple priority queues, 802.1Qav suggests using 75% for the
1109 * highest priority queue, and 0% for the lower priority queues.
1110 * This is because the lower priority queues are supposed to be using
1111 * the bandwidth available from the higher priority queues AND its own
1112 * available bandwidth (see 802.1Q 34.3.1 for more details).
1113 * This does not work like that in SAM GMAC - the lower priority queues
1114 * are not using the bandwidth reserved for the higher priority queues
1115 * at all. Thus we still set the default to a total of the recommended
1116 * 75%, but split the bandwidth between them manually.
1117 */
1118 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25);
1119 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 50);
1120 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 3
1121 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25);
1122 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 25);
1123 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 25);
1124 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 4
1125 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 21);
1126 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 18);
1127 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 18);
1128 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 18);
1129 #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 5
1130 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 15);
1131 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 15);
1132 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 15);
1133 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 15);
1134 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 5, 15);
1135 #endif
1136
1137 eth_sam_gmac_init_qav(gmac);
1138
1139 return 0;
1140 }
1141
link_configure(Gmac * gmac,bool full_duplex,bool speed_100M)1142 static void link_configure(Gmac *gmac, bool full_duplex, bool speed_100M)
1143 {
1144 uint32_t val;
1145
1146 val = gmac->GMAC_NCFGR;
1147
1148 val &= ~(GMAC_NCFGR_FD | GMAC_NCFGR_SPD);
1149 val |= (full_duplex) ? GMAC_NCFGR_FD : 0;
1150 val |= (speed_100M) ? GMAC_NCFGR_SPD : 0;
1151
1152 gmac->GMAC_NCFGR = val;
1153
1154 gmac->GMAC_NCR |= (GMAC_NCR_RXEN | GMAC_NCR_TXEN);
1155 }
1156
nonpriority_queue_init(Gmac * gmac,struct gmac_queue * queue)1157 static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue)
1158 {
1159 int result;
1160
1161 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0);
1162 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0);
1163 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk),
1164 "RX descriptors have to be word aligned");
1165 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk),
1166 "TX descriptors have to be word aligned");
1167
1168 /* Setup descriptor lists */
1169 result = rx_descriptors_init(gmac, queue);
1170 if (result < 0) {
1171 return result;
1172 }
1173
1174 tx_descriptors_init(gmac, queue);
1175
1176 #if GMAC_MULTIPLE_TX_PACKETS == 0
1177 /* Initialize TX semaphore. This semaphore is used to wait until the TX
1178 * data has been sent.
1179 */
1180 k_sem_init(&queue->tx_sem, 0, 1);
1181 #else
1182 /* Initialize TX descriptors semaphore. The semaphore is required as the
1183 * size of the TX descriptor list is limited while the number of TX data
1184 * buffers is not.
1185 */
1186 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1,
1187 queue->tx_desc_list.len - 1);
1188 #endif
1189
1190 /* Set Receive Buffer Queue Pointer Register */
1191 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf;
1192 /* Set Transmit Buffer Queue Pointer Register */
1193 gmac->GMAC_TBQB = (uint32_t)queue->tx_desc_list.buf;
1194
1195 /* Configure GMAC DMA transfer */
1196 gmac->GMAC_DCFGR =
1197 /* Receive Buffer Size (defined in multiples of 64 bytes) */
1198 GMAC_DCFGR_DRBS(CONFIG_NET_BUF_DATA_SIZE >> 6) |
1199 #if defined(GMAC_DCFGR_RXBMS)
1200 /* Use full receive buffer size on parts where this is selectable */
1201 GMAC_DCFGR_RXBMS(3) |
1202 #endif
1203 /* Attempt to use INCR4 AHB bursts (Default) */
1204 GMAC_DCFGR_FBLDO_INCR4 |
1205 /* DMA Queue Flags */
1206 GMAC_DMA_QUEUE_FLAGS;
1207
1208 /* Setup RX/TX completion and error interrupts */
1209 gmac->GMAC_IER = GMAC_INT_EN_FLAGS;
1210
1211 queue->err_rx_frames_dropped = 0U;
1212 queue->err_rx_flushed_count = 0U;
1213 queue->err_tx_flushed_count = 0U;
1214
1215 LOG_INF("Queue %d activated", queue->que_idx);
1216
1217 return 0;
1218 }
1219
frame_get(struct gmac_queue * queue)1220 static struct net_pkt *frame_get(struct gmac_queue *queue)
1221 {
1222 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
1223 struct gmac_desc *rx_desc;
1224 struct net_buf **rx_frag_list = queue->rx_frag_list;
1225 struct net_pkt *rx_frame;
1226 bool frame_is_complete;
1227 struct net_buf *frag;
1228 struct net_buf *new_frag;
1229 struct net_buf *last_frag = NULL;
1230 uint8_t *frag_data;
1231 uint32_t frag_len;
1232 uint32_t frame_len = 0U;
1233 uint16_t tail;
1234 uint8_t wrap;
1235
1236 /* Check if there exists a complete frame in RX descriptor list */
1237 tail = rx_desc_list->tail;
1238 rx_desc = &rx_desc_list->buf[tail];
1239 frame_is_complete = false;
1240 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP)
1241 && !frame_is_complete) {
1242 frame_is_complete = (bool)(rx_desc->w1
1243 & GMAC_RXW1_EOF);
1244 MODULO_INC(tail, rx_desc_list->len);
1245 rx_desc = &rx_desc_list->buf[tail];
1246 }
1247 /* Frame which is not complete can be dropped by GMAC. Do not process
1248 * it, even partially.
1249 */
1250 if (!frame_is_complete) {
1251 return NULL;
1252 }
1253
1254 rx_frame = net_pkt_rx_alloc(K_NO_WAIT);
1255
1256 /* Process a frame */
1257 tail = rx_desc_list->tail;
1258 rx_desc = &rx_desc_list->buf[tail];
1259 frame_is_complete = false;
1260
1261 /* TODO: Don't assume first RX fragment will have SOF (Start of frame)
1262 * bit set. If SOF bit is missing recover gracefully by dropping
1263 * invalid frame.
1264 */
1265 __ASSERT(rx_desc->w1 & GMAC_RXW1_SOF,
1266 "First RX fragment is missing SOF bit");
1267
1268 /* TODO: We know already tail and head indexes of fragments containing
1269 * complete frame. Loop over those indexes, don't search for them
1270 * again.
1271 */
1272 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP)
1273 && !frame_is_complete) {
1274 frag = rx_frag_list[tail];
1275 frag_data =
1276 (uint8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR);
1277 __ASSERT(frag->data == frag_data,
1278 "RX descriptor and buffer list desynchronized");
1279 frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF);
1280 if (frame_is_complete) {
1281 frag_len = (rx_desc->w1 & GMAC_RXW1_LEN) - frame_len;
1282 } else {
1283 frag_len = CONFIG_NET_BUF_DATA_SIZE;
1284 }
1285
1286 frame_len += frag_len;
1287
1288 /* Link frame fragments only if RX net buffer is valid */
1289 if (rx_frame != NULL) {
1290 /* Assure cache coherency after DMA write operation */
1291 dcache_invalidate((uint32_t)frag_data, frag->size);
1292
1293 /* Get a new data net buffer from the buffer pool */
1294 new_frag = net_pkt_get_frag(rx_frame, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
1295 if (new_frag == NULL) {
1296 queue->err_rx_frames_dropped++;
1297 net_pkt_unref(rx_frame);
1298 rx_frame = NULL;
1299 } else {
1300 net_buf_add(frag, frag_len);
1301 if (!last_frag) {
1302 net_pkt_frag_insert(rx_frame, frag);
1303 } else {
1304 net_buf_frag_insert(last_frag, frag);
1305 }
1306 last_frag = frag;
1307 frag = new_frag;
1308 rx_frag_list[tail] = frag;
1309 }
1310 }
1311
1312 /* Update buffer descriptor status word */
1313 rx_desc->w1 = 0U;
1314 /* Guarantee that status word is written before the address
1315 * word to avoid race condition.
1316 */
1317 barrier_dmem_fence_full();
1318 /* Update buffer descriptor address word */
1319 wrap = (tail == rx_desc_list->len-1U ? GMAC_RXW0_WRAP : 0);
1320 rx_desc->w0 = ((uint32_t)frag->data & GMAC_RXW0_ADDR) | wrap;
1321
1322 MODULO_INC(tail, rx_desc_list->len);
1323 rx_desc = &rx_desc_list->buf[tail];
1324 }
1325
1326 rx_desc_list->tail = tail;
1327 LOG_DBG("Frame complete: rx=%p, tail=%d", rx_frame, tail);
1328 __ASSERT_NO_MSG(frame_is_complete);
1329
1330 return rx_frame;
1331 }
1332
eth_rx(struct gmac_queue * queue)1333 static void eth_rx(struct gmac_queue *queue)
1334 {
1335 struct eth_sam_dev_data *dev_data =
1336 CONTAINER_OF(queue, struct eth_sam_dev_data,
1337 queue_list[queue->que_idx]);
1338 struct net_pkt *rx_frame;
1339 #if defined(CONFIG_NET_GPTP)
1340 const struct device *const dev = net_if_get_device(dev_data->iface);
1341 const struct eth_sam_dev_cfg *const cfg = dev->config;
1342 Gmac *gmac = cfg->regs;
1343 struct gptp_hdr *hdr;
1344 #endif
1345
1346 /* More than one frame could have been received by GMAC, get all
1347 * complete frames stored in the GMAC RX descriptor list.
1348 */
1349 rx_frame = frame_get(queue);
1350 while (rx_frame) {
1351 LOG_DBG("ETH rx");
1352
1353 #if defined(CONFIG_NET_GPTP)
1354 hdr = check_gptp_msg(get_iface(dev_data), rx_frame, false);
1355
1356 timestamp_rx_pkt(gmac, hdr, rx_frame);
1357
1358 if (hdr) {
1359 update_pkt_priority(hdr, rx_frame);
1360 }
1361 #endif /* CONFIG_NET_GPTP */
1362
1363 if (net_recv_data(get_iface(dev_data), rx_frame) < 0) {
1364 eth_stats_update_errors_rx(get_iface(dev_data));
1365 net_pkt_unref(rx_frame);
1366 }
1367
1368 rx_frame = frame_get(queue);
1369 }
1370 }
1371
1372 #if !defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE) && \
1373 ((GMAC_ACTIVE_QUEUE_NUM != NET_TC_TX_COUNT) || \
1374 ((NET_TC_TX_COUNT != NET_TC_RX_COUNT) && defined(CONFIG_NET_VLAN)))
priority2queue(enum net_priority priority)1375 static int priority2queue(enum net_priority priority)
1376 {
1377 static const uint8_t queue_priority_map[] = {
1378 #if GMAC_ACTIVE_QUEUE_NUM == 1
1379 0, 0, 0, 0, 0, 0, 0, 0
1380 #endif
1381 #if GMAC_ACTIVE_QUEUE_NUM == 2
1382 0, 0, 0, 0, 1, 1, 1, 1
1383 #endif
1384 #if GMAC_ACTIVE_QUEUE_NUM == 3
1385 0, 0, 0, 0, 1, 1, 2, 2
1386 #endif
1387 #if GMAC_ACTIVE_QUEUE_NUM == 4
1388 0, 0, 0, 0, 1, 1, 2, 3
1389 #endif
1390 #if GMAC_ACTIVE_QUEUE_NUM == 5
1391 0, 0, 0, 0, 1, 2, 3, 4
1392 #endif
1393 #if GMAC_ACTIVE_QUEUE_NUM == 6
1394 0, 0, 0, 1, 2, 3, 4, 5
1395 #endif
1396 };
1397
1398 return queue_priority_map[priority];
1399 }
1400 #endif
1401
eth_tx(const struct device * dev,struct net_pkt * pkt)1402 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
1403 {
1404 const struct eth_sam_dev_cfg *const cfg = dev->config;
1405 struct eth_sam_dev_data *const dev_data = dev->data;
1406 Gmac *gmac = cfg->regs;
1407 struct gmac_queue *queue;
1408 struct gmac_desc_list *tx_desc_list;
1409 struct gmac_desc *tx_desc;
1410 struct gmac_desc *tx_first_desc;
1411 struct net_buf *frag;
1412 uint8_t *frag_data;
1413 uint16_t frag_len;
1414 uint32_t err_tx_flushed_count_at_entry;
1415 #if GMAC_MULTIPLE_TX_PACKETS == 1
1416 unsigned int key;
1417 #endif
1418 uint8_t pkt_prio;
1419 #if GMAC_MULTIPLE_TX_PACKETS == 0
1420 #if defined(CONFIG_NET_GPTP)
1421 struct gptp_hdr *hdr;
1422 #endif
1423 #endif
1424
1425 __ASSERT(pkt, "buf pointer is NULL");
1426 __ASSERT(pkt->frags, "Frame data missing");
1427
1428 LOG_DBG("ETH tx");
1429
1430 /* Decide which queue should be used */
1431 pkt_prio = net_pkt_priority(pkt);
1432
1433 #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE)
1434 /* Route eveything to the forced queue */
1435 queue = &dev_data->queue_list[CONFIG_ETH_SAM_GMAC_FORCED_QUEUE];
1436 #elif GMAC_ACTIVE_QUEUE_NUM == CONFIG_NET_TC_TX_COUNT
1437 /* Prefer to chose queue based on its traffic class */
1438 queue = &dev_data->queue_list[net_tx_priority2tc(pkt_prio)];
1439 #else
1440 /* If that's not possible due to config - use builtin mapping */
1441 queue = &dev_data->queue_list[priority2queue(pkt_prio)];
1442 #endif
1443
1444 tx_desc_list = &queue->tx_desc_list;
1445 err_tx_flushed_count_at_entry = queue->err_tx_flushed_count;
1446
1447 frag = pkt->frags;
1448
1449 /* Keep reference to the descriptor */
1450 tx_first_desc = &tx_desc_list->buf[tx_desc_list->head];
1451
1452 while (frag) {
1453 frag_data = frag->data;
1454 frag_len = frag->len;
1455
1456 /* Assure cache coherency before DMA read operation */
1457 dcache_clean((uint32_t)frag_data, frag->size);
1458
1459 #if GMAC_MULTIPLE_TX_PACKETS == 1
1460 k_sem_take(&queue->tx_desc_sem, K_FOREVER);
1461
1462 /* The following section becomes critical and requires IRQ lock
1463 * / unlock protection only due to the possibility of executing
1464 * tx_error_handler() function.
1465 */
1466 key = irq_lock();
1467
1468 /* Check if tx_error_handler() function was executed */
1469 if (queue->err_tx_flushed_count !=
1470 err_tx_flushed_count_at_entry) {
1471 irq_unlock(key);
1472 return -EIO;
1473 }
1474 #endif
1475
1476 tx_desc = &tx_desc_list->buf[tx_desc_list->head];
1477
1478 /* Update buffer descriptor address word */
1479 tx_desc->w0 = (uint32_t)frag_data;
1480
1481 /* Update buffer descriptor status word (clear used bit except
1482 * for the first frag).
1483 */
1484 tx_desc->w1 = (frag_len & GMAC_TXW1_LEN)
1485 | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0)
1486 | (tx_desc_list->head == tx_desc_list->len - 1U
1487 ? GMAC_TXW1_WRAP : 0)
1488 | (tx_desc == tx_first_desc ? GMAC_TXW1_USED : 0);
1489
1490 /* Update descriptor position */
1491 MODULO_INC(tx_desc_list->head, tx_desc_list->len);
1492
1493 #if GMAC_MULTIPLE_TX_PACKETS == 1
1494 __ASSERT(tx_desc_list->head != tx_desc_list->tail,
1495 "tx_desc_list overflow");
1496
1497 /* Account for a sent frag */
1498 ring_buf_put(&queue->tx_frag_list, POINTER_TO_UINT(frag));
1499
1500 /* frag is internally queued, so it requires to hold a reference */
1501 net_pkt_frag_ref(frag);
1502
1503 irq_unlock(key);
1504 #endif
1505
1506 /* Continue with the rest of fragments (only data) */
1507 frag = frag->frags;
1508 }
1509
1510 #if GMAC_MULTIPLE_TX_PACKETS == 1
1511 key = irq_lock();
1512
1513 /* Check if tx_error_handler() function was executed */
1514 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
1515 irq_unlock(key);
1516 return -EIO;
1517 }
1518 #endif
1519
1520 /* Ensure the descriptor following the last one is marked as used */
1521 tx_desc_list->buf[tx_desc_list->head].w1 = GMAC_TXW1_USED;
1522
1523 /* Guarantee that all the fragments have been written before removing
1524 * the used bit to avoid race condition.
1525 */
1526 barrier_dmem_fence_full();
1527
1528 /* Remove the used bit of the first fragment to allow the controller
1529 * to process it and the following fragments.
1530 */
1531 tx_first_desc->w1 &= ~GMAC_TXW1_USED;
1532
1533 #if GMAC_MULTIPLE_TX_PACKETS == 1
1534 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1535 /* Account for a sent frame */
1536 ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt));
1537
1538 /* pkt is internally queued, so it requires to hold a reference */
1539 net_pkt_ref(pkt);
1540 #endif
1541
1542 irq_unlock(key);
1543 #endif
1544
1545 /* Guarantee that the first fragment got its bit removed before starting
1546 * sending packets to avoid packets getting stuck.
1547 */
1548 barrier_dmem_fence_full();
1549
1550 /* Start transmission */
1551 gmac->GMAC_NCR |= GMAC_NCR_TSTART;
1552
1553 #if GMAC_MULTIPLE_TX_PACKETS == 0
1554 /* Wait until the packet is sent */
1555 k_sem_take(&queue->tx_sem, K_FOREVER);
1556
1557 /* Check if transmit successful or not */
1558 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
1559 return -EIO;
1560 }
1561 #if defined(CONFIG_NET_GPTP)
1562 #if defined(CONFIG_NET_GPTP)
1563 hdr = check_gptp_msg(get_iface(dev_data), pkt, true);
1564 timestamp_tx_pkt(gmac, hdr, pkt);
1565 if (hdr && need_timestamping(hdr)) {
1566 net_if_add_tx_timestamp(pkt);
1567 }
1568 #endif
1569 #endif
1570 #endif
1571
1572 return 0;
1573 }
1574
queue0_isr(const struct device * dev)1575 static void queue0_isr(const struct device *dev)
1576 {
1577 const struct eth_sam_dev_cfg *const cfg = dev->config;
1578 struct eth_sam_dev_data *const dev_data = dev->data;
1579 Gmac *gmac = cfg->regs;
1580 struct gmac_queue *queue;
1581 struct gmac_desc_list *rx_desc_list;
1582 struct gmac_desc_list *tx_desc_list;
1583 struct gmac_desc *tail_desc;
1584 uint32_t isr;
1585
1586 /* Interrupt Status Register is cleared on read */
1587 isr = gmac->GMAC_ISR;
1588 LOG_DBG("GMAC_ISR=0x%08x", isr);
1589
1590 queue = &dev_data->queue_list[0];
1591 rx_desc_list = &queue->rx_desc_list;
1592 tx_desc_list = &queue->tx_desc_list;
1593
1594 /* RX packet */
1595 if (isr & GMAC_INT_RX_ERR_BITS) {
1596 rx_error_handler(gmac, queue);
1597 } else if (isr & GMAC_ISR_RCOMP) {
1598 tail_desc = &rx_desc_list->buf[rx_desc_list->tail];
1599 LOG_DBG("rx.w1=0x%08x, tail=%d",
1600 tail_desc->w1,
1601 rx_desc_list->tail);
1602 eth_rx(queue);
1603 }
1604
1605 /* TX packet */
1606 if (isr & GMAC_INT_TX_ERR_BITS) {
1607 tx_error_handler(gmac, queue);
1608 } else if (isr & GMAC_ISR_TCOMP) {
1609 #if GMAC_MULTIPLE_TX_PACKETS == 1
1610 tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
1611 LOG_DBG("tx.w1=0x%08x, tail=%d",
1612 tail_desc->w1,
1613 tx_desc_list->tail);
1614 #endif
1615
1616 tx_completed(gmac, queue);
1617 }
1618
1619 if (isr & GMAC_IER_HRESP) {
1620 LOG_DBG("IER HRESP");
1621 }
1622 }
1623
1624 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
priority_queue_isr(const struct device * dev,unsigned int queue_idx)1625 static inline void priority_queue_isr(const struct device *dev,
1626 unsigned int queue_idx)
1627 {
1628 const struct eth_sam_dev_cfg *const cfg = dev->config;
1629 struct eth_sam_dev_data *const dev_data = dev->data;
1630 Gmac *gmac = cfg->regs;
1631 struct gmac_queue *queue;
1632 struct gmac_desc_list *rx_desc_list;
1633 struct gmac_desc_list *tx_desc_list;
1634 struct gmac_desc *tail_desc;
1635 uint32_t isrpq;
1636
1637 isrpq = gmac->GMAC_ISRPQ[queue_idx - 1];
1638 LOG_DBG("GMAC_ISRPQ%d=0x%08x", queue_idx - 1, isrpq);
1639
1640 queue = &dev_data->queue_list[queue_idx];
1641 rx_desc_list = &queue->rx_desc_list;
1642 tx_desc_list = &queue->tx_desc_list;
1643
1644 /* RX packet */
1645 if (isrpq & GMAC_INTPQ_RX_ERR_BITS) {
1646 rx_error_handler(gmac, queue);
1647 } else if (isrpq & GMAC_ISRPQ_RCOMP) {
1648 tail_desc = &rx_desc_list->buf[rx_desc_list->tail];
1649 LOG_DBG("rx.w1=0x%08x, tail=%d",
1650 tail_desc->w1,
1651 rx_desc_list->tail);
1652 eth_rx(queue);
1653 }
1654
1655 /* TX packet */
1656 if (isrpq & GMAC_INTPQ_TX_ERR_BITS) {
1657 tx_error_handler(gmac, queue);
1658 } else if (isrpq & GMAC_ISRPQ_TCOMP) {
1659 #if GMAC_MULTIPLE_TX_PACKETS == 1
1660 tail_desc = &tx_desc_list->buf[tx_desc_list->tail];
1661 LOG_DBG("tx.w1=0x%08x, tail=%d",
1662 tail_desc->w1,
1663 tx_desc_list->tail);
1664 #endif
1665
1666 tx_completed(gmac, queue);
1667 }
1668
1669 if (isrpq & GMAC_IERPQ_HRESP) {
1670 LOG_DBG("IERPQ%d HRESP", queue_idx - 1);
1671 }
1672 }
1673 #endif
1674
1675 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
queue1_isr(const struct device * dev)1676 static void queue1_isr(const struct device *dev)
1677 {
1678 priority_queue_isr(dev, 1);
1679 }
1680 #endif
1681
1682 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
queue2_isr(const struct device * dev)1683 static void queue2_isr(const struct device *dev)
1684 {
1685 priority_queue_isr(dev, 2);
1686 }
1687 #endif
1688
1689 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
queue3_isr(const struct device * dev)1690 static void queue3_isr(const struct device *dev)
1691 {
1692 priority_queue_isr(dev, 3);
1693 }
1694 #endif
1695
1696 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
queue4_isr(const struct device * dev)1697 static void queue4_isr(const struct device *dev)
1698 {
1699 priority_queue_isr(dev, 4);
1700 }
1701 #endif
1702
1703 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
queue5_isr(const struct device * dev)1704 static void queue5_isr(const struct device *dev)
1705 {
1706 priority_queue_isr(dev, 5);
1707 }
1708 #endif
1709
eth_initialize(const struct device * dev)1710 static int eth_initialize(const struct device *dev)
1711 {
1712 const struct eth_sam_dev_cfg *const cfg = dev->config;
1713 int retval;
1714
1715 cfg->config_func();
1716
1717 #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM
1718 /* Enable GMAC module's clock */
1719 (void)clock_control_on(SAM_DT_PMC_CONTROLLER,
1720 (clock_control_subsys_t)&cfg->clock_cfg);
1721 #else
1722 /* Enable MCLK clock on GMAC */
1723 MCLK->AHBMASK.reg |= MCLK_AHBMASK_GMAC;
1724 *MCLK_GMAC |= MCLK_GMAC_MASK;
1725 #endif
1726 /* Connect pins to the peripheral */
1727 retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1728
1729 return retval;
1730 }
1731
1732 #if DT_INST_NODE_HAS_PROP(0, mac_eeprom)
get_mac_addr_from_i2c_eeprom(uint8_t mac_addr[6])1733 static void get_mac_addr_from_i2c_eeprom(uint8_t mac_addr[6])
1734 {
1735 uint32_t iaddr = CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS;
1736 int ret;
1737 const struct i2c_dt_spec i2c = I2C_DT_SPEC_GET(DT_INST_PHANDLE(0, mac_eeprom));
1738
1739 if (!device_is_ready(i2c.bus)) {
1740 LOG_ERR("Bus device is not ready");
1741 return;
1742 }
1743
1744 ret = i2c_write_read_dt(&i2c,
1745 &iaddr, CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS_SIZE,
1746 mac_addr, 6);
1747
1748 if (ret != 0) {
1749 LOG_ERR("I2C: failed to read MAC addr");
1750 return;
1751 }
1752 }
1753 #endif
1754
generate_mac(uint8_t mac_addr[6])1755 static void generate_mac(uint8_t mac_addr[6])
1756 {
1757 #if DT_INST_NODE_HAS_PROP(0, mac_eeprom)
1758 get_mac_addr_from_i2c_eeprom(mac_addr);
1759 #elif DT_INST_PROP(0, zephyr_random_mac_address)
1760 gen_random_mac(mac_addr, ATMEL_OUI_B0, ATMEL_OUI_B1, ATMEL_OUI_B2);
1761 #endif
1762 }
1763
phy_link_state_changed(const struct device * pdev,struct phy_link_state * state,void * user_data)1764 static void phy_link_state_changed(const struct device *pdev,
1765 struct phy_link_state *state,
1766 void *user_data)
1767 {
1768 const struct device *dev = (const struct device *) user_data;
1769 struct eth_sam_dev_data *const dev_data = dev->data;
1770 const struct eth_sam_dev_cfg *const cfg = dev->config;
1771 bool is_up;
1772
1773 is_up = state->is_up;
1774
1775 if (is_up && !dev_data->link_up) {
1776 LOG_INF("Link up");
1777
1778 /* Announce link up status */
1779 dev_data->link_up = true;
1780 net_eth_carrier_on(dev_data->iface);
1781
1782 /* Set up link */
1783 link_configure(cfg->regs,
1784 PHY_LINK_IS_FULL_DUPLEX(state->speed),
1785 PHY_LINK_IS_SPEED_100M(state->speed));
1786 } else if (!is_up && dev_data->link_up) {
1787 LOG_INF("Link down");
1788
1789 /* Announce link down status */
1790 dev_data->link_up = false;
1791 net_eth_carrier_off(dev_data->iface);
1792 }
1793 }
1794
eth_sam_gmac_get_phy(const struct device * dev)1795 static const struct device *eth_sam_gmac_get_phy(const struct device *dev)
1796 {
1797 const struct eth_sam_dev_cfg *const cfg = dev->config;
1798
1799 return cfg->phy_dev;
1800 }
1801
eth0_iface_init(struct net_if * iface)1802 static void eth0_iface_init(struct net_if *iface)
1803 {
1804 const struct device *dev = net_if_get_device(iface);
1805 struct eth_sam_dev_data *const dev_data = dev->data;
1806 const struct eth_sam_dev_cfg *const cfg = dev->config;
1807 static bool init_done;
1808 uint32_t gmac_ncfgr_val;
1809 int result;
1810 int i;
1811
1812 if (dev_data->iface == NULL) {
1813 dev_data->iface = iface;
1814 }
1815
1816 ethernet_init(iface);
1817
1818 /* The rest of initialization should only be done once */
1819 if (init_done) {
1820 return;
1821 }
1822
1823 /* Check the status of data caches */
1824 dcache_is_enabled();
1825
1826 /* Initialize GMAC driver */
1827 gmac_ncfgr_val =
1828 GMAC_NCFGR_MTIHEN /* Multicast Hash Enable */
1829 | GMAC_NCFGR_LFERD /* Length Field Error Frame Discard */
1830 | GMAC_NCFGR_RFCS /* Remove Frame Check Sequence */
1831 | GMAC_NCFGR_RXCOEN /* Receive Checksum Offload Enable */
1832 | GMAC_MAX_FRAME_SIZE;
1833 result = gmac_init(cfg->regs, gmac_ncfgr_val);
1834 if (result < 0) {
1835 LOG_ERR("Unable to initialize ETH driver");
1836 return;
1837 }
1838
1839 generate_mac(dev_data->mac_addr);
1840
1841 LOG_INF("MAC: %02x:%02x:%02x:%02x:%02x:%02x",
1842 dev_data->mac_addr[0], dev_data->mac_addr[1],
1843 dev_data->mac_addr[2], dev_data->mac_addr[3],
1844 dev_data->mac_addr[4], dev_data->mac_addr[5]);
1845
1846 /* Set MAC Address for frame filtering logic */
1847 mac_addr_set(cfg->regs, 0, dev_data->mac_addr);
1848
1849 /* Register Ethernet MAC Address with the upper layer */
1850 net_if_set_link_addr(iface, dev_data->mac_addr,
1851 sizeof(dev_data->mac_addr),
1852 NET_LINK_ETHERNET);
1853
1854 /* Initialize GMAC queues */
1855 for (i = GMAC_QUE_0; i < GMAC_QUEUE_NUM; i++) {
1856 result = queue_init(cfg->regs, &dev_data->queue_list[i]);
1857 if (result < 0) {
1858 LOG_ERR("Unable to initialize ETH queue%d", i);
1859 return;
1860 }
1861 }
1862
1863 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
1864 #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE)
1865 for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) {
1866 cfg->regs->GMAC_ST1RPQ[i] =
1867 GMAC_ST1RPQ_DSTCM(i) |
1868 GMAC_ST1RPQ_QNB(CONFIG_ETH_SAM_GMAC_FORCED_QUEUE);
1869 }
1870 #elif GMAC_ACTIVE_QUEUE_NUM == NET_TC_RX_COUNT
1871 /* If TC configuration is compatible with HW configuration, setup the
1872 * screening registers based on the DS/TC values.
1873 * Map them 1:1 - TC 0 -> Queue 0, TC 1 -> Queue 1 etc.
1874 */
1875 for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) {
1876 cfg->regs->GMAC_ST1RPQ[i] =
1877 GMAC_ST1RPQ_DSTCM(i) | GMAC_ST1RPQ_QNB(i);
1878 }
1879 #elif defined(CONFIG_NET_VLAN)
1880 /* If VLAN is enabled, route packets according to VLAN priority */
1881 int j;
1882
1883 i = 0;
1884 for (j = NET_PRIORITY_NC; j >= 0; --j) {
1885 if (priority2queue(j) == 0) {
1886 /* No point to set rules for the regular queue */
1887 continue;
1888 }
1889
1890 if (i >= ARRAY_SIZE(cfg->regs->GMAC_ST2RPQ)) {
1891 /* No more screening registers available */
1892 break;
1893 }
1894
1895 cfg->regs->GMAC_ST2RPQ[i++] =
1896 GMAC_ST2RPQ_QNB(priority2queue(j))
1897 | GMAC_ST2RPQ_VLANP(j)
1898 | GMAC_ST2RPQ_VLANE;
1899 }
1900
1901 #endif
1902 #endif
1903 if (device_is_ready(cfg->phy_dev)) {
1904 phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed,
1905 (void *)dev);
1906
1907 } else {
1908 LOG_ERR("PHY device not ready");
1909 }
1910
1911 /* Do not start the interface until PHY link is up */
1912 if (!(dev_data->link_up)) {
1913 net_if_carrier_off(iface);
1914 }
1915
1916 init_done = true;
1917 }
1918
eth_sam_gmac_get_capabilities(const struct device * dev)1919 static enum ethernet_hw_caps eth_sam_gmac_get_capabilities(const struct device *dev)
1920 {
1921 ARG_UNUSED(dev);
1922
1923 return ETHERNET_LINK_10BASE_T |
1924 #if defined(CONFIG_NET_VLAN)
1925 ETHERNET_HW_VLAN |
1926 #endif
1927 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
1928 ETHERNET_PTP |
1929 #endif
1930 ETHERNET_PRIORITY_QUEUES |
1931 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
1932 ETHERNET_QAV |
1933 #endif
1934 ETHERNET_LINK_100BASE_T;
1935 }
1936
1937 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_set_qav_param(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1938 static int eth_sam_gmac_set_qav_param(const struct device *dev,
1939 enum ethernet_config_type type,
1940 const struct ethernet_config *config)
1941 {
1942 const struct eth_sam_dev_cfg *const cfg = dev->config;
1943 Gmac *gmac = cfg->regs;
1944 enum ethernet_qav_param_type qav_param_type;
1945 unsigned int delta_bandwidth;
1946 unsigned int idle_slope;
1947 int queue_id;
1948 bool enable;
1949
1950 /* Priority queue IDs start from 1 for SAM GMAC */
1951 queue_id = config->qav_param.queue_id + 1;
1952
1953 qav_param_type = config->qav_param.type;
1954
1955 switch (qav_param_type) {
1956 case ETHERNET_QAV_PARAM_TYPE_STATUS:
1957 enable = config->qav_param.enabled;
1958 return eth_sam_gmac_setup_qav(gmac, queue_id, enable);
1959 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
1960 delta_bandwidth = config->qav_param.delta_bandwidth;
1961
1962 return eth_sam_gmac_setup_qav_delta_bandwidth(gmac, queue_id,
1963 delta_bandwidth);
1964 case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
1965 idle_slope = config->qav_param.idle_slope;
1966
1967 /* The standard uses bps, SAM GMAC uses Bps - convert now */
1968 idle_slope /= 8U;
1969
1970 return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id,
1971 idle_slope);
1972 default:
1973 break;
1974 }
1975
1976 return -ENOTSUP;
1977 }
1978 #endif
1979
eth_sam_gmac_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1980 static int eth_sam_gmac_set_config(const struct device *dev,
1981 enum ethernet_config_type type,
1982 const struct ethernet_config *config)
1983 {
1984 int result = 0;
1985
1986 switch (type) {
1987 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
1988 case ETHERNET_CONFIG_TYPE_QAV_PARAM:
1989 return eth_sam_gmac_set_qav_param(dev, type, config);
1990 #endif
1991 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1992 {
1993 struct eth_sam_dev_data *const dev_data = dev->data;
1994 const struct eth_sam_dev_cfg *const cfg = dev->config;
1995
1996 memcpy(dev_data->mac_addr,
1997 config->mac_address.addr,
1998 sizeof(dev_data->mac_addr));
1999
2000 /* Set MAC Address for frame filtering logic */
2001 mac_addr_set(cfg->regs, 0, dev_data->mac_addr);
2002
2003 LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
2004 dev->name,
2005 dev_data->mac_addr[0], dev_data->mac_addr[1],
2006 dev_data->mac_addr[2], dev_data->mac_addr[3],
2007 dev_data->mac_addr[4], dev_data->mac_addr[5]);
2008
2009 /* Register Ethernet MAC Address with the upper layer */
2010 net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
2011 sizeof(dev_data->mac_addr),
2012 NET_LINK_ETHERNET);
2013 break;
2014 }
2015 default:
2016 result = -ENOTSUP;
2017 break;
2018 }
2019
2020 return result;
2021 }
2022
2023 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
eth_sam_gmac_get_qav_param(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)2024 static int eth_sam_gmac_get_qav_param(const struct device *dev,
2025 enum ethernet_config_type type,
2026 struct ethernet_config *config)
2027 {
2028 const struct eth_sam_dev_cfg *const cfg = dev->config;
2029 Gmac *gmac = cfg->regs;
2030 enum ethernet_qav_param_type qav_param_type;
2031 int queue_id;
2032 bool *enabled;
2033 unsigned int *idle_slope;
2034 unsigned int *delta_bandwidth;
2035
2036 /* Priority queue IDs start from 1 for SAM GMAC */
2037 queue_id = config->qav_param.queue_id + 1;
2038
2039 qav_param_type = config->qav_param.type;
2040
2041 switch (qav_param_type) {
2042 case ETHERNET_QAV_PARAM_TYPE_STATUS:
2043 enabled = &config->qav_param.enabled;
2044 return eth_sam_gmac_get_qav_status(gmac, queue_id, enabled);
2045 case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE:
2046 idle_slope = &config->qav_param.idle_slope;
2047 return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id,
2048 idle_slope);
2049 case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE:
2050 idle_slope = &config->qav_param.oper_idle_slope;
2051 return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id,
2052 idle_slope);
2053 case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH:
2054 delta_bandwidth = &config->qav_param.delta_bandwidth;
2055 return eth_sam_gmac_get_qav_delta_bandwidth(gmac, queue_id,
2056 delta_bandwidth);
2057 case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS:
2058 #if GMAC_ACTIVE_QUEUE_NUM == NET_TC_TX_COUNT
2059 config->qav_param.traffic_class = queue_id;
2060 return 0;
2061 #else
2062 /* Invalid configuration - no direct TC to queue mapping */
2063 return -ENOTSUP;
2064 #endif
2065 default:
2066 break;
2067 }
2068
2069 return -ENOTSUP;
2070 }
2071 #endif
2072
eth_sam_gmac_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)2073 static int eth_sam_gmac_get_config(const struct device *dev,
2074 enum ethernet_config_type type,
2075 struct ethernet_config *config)
2076 {
2077 switch (type) {
2078 case ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM:
2079 config->priority_queues_num = GMAC_ACTIVE_PRIORITY_QUEUE_NUM;
2080 return 0;
2081 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2082 case ETHERNET_CONFIG_TYPE_QAV_PARAM:
2083 return eth_sam_gmac_get_qav_param(dev, type, config);
2084 #endif
2085 default:
2086 break;
2087 }
2088
2089 return -ENOTSUP;
2090 }
2091
2092 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
eth_sam_gmac_get_ptp_clock(const struct device * dev)2093 static const struct device *eth_sam_gmac_get_ptp_clock(const struct device *dev)
2094 {
2095 struct eth_sam_dev_data *const dev_data = dev->data;
2096
2097 return dev_data->ptp_clock;
2098 }
2099 #endif
2100
2101 static const struct ethernet_api eth_api = {
2102 .iface_api.init = eth0_iface_init,
2103
2104 .get_capabilities = eth_sam_gmac_get_capabilities,
2105 .set_config = eth_sam_gmac_set_config,
2106 .get_config = eth_sam_gmac_get_config,
2107 .get_phy = eth_sam_gmac_get_phy,
2108 .send = eth_tx,
2109
2110 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2111 .get_ptp_clock = eth_sam_gmac_get_ptp_clock,
2112 #endif
2113 };
2114
eth0_irq_config(void)2115 static void eth0_irq_config(void)
2116 {
2117 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, gmac, irq),
2118 DT_INST_IRQ_BY_NAME(0, gmac, priority),
2119 queue0_isr, DEVICE_DT_INST_GET(0), 0);
2120 irq_enable(DT_INST_IRQ_BY_NAME(0, gmac, irq));
2121
2122 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2123 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q1, irq),
2124 DT_INST_IRQ_BY_NAME(0, q1, priority),
2125 queue1_isr, DEVICE_DT_INST_GET(0), 0);
2126 irq_enable(DT_INST_IRQ_BY_NAME(0, q1, irq));
2127 #endif
2128
2129 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
2130 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q2, irq),
2131 DT_INST_IRQ_BY_NAME(0, q1, priority),
2132 queue2_isr, DEVICE_DT_INST_GET(0), 0);
2133 irq_enable(DT_INST_IRQ_BY_NAME(0, q2, irq));
2134 #endif
2135
2136 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
2137 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q3, irq),
2138 DT_INST_IRQ_BY_NAME(0, q3, priority),
2139 queue3_isr, DEVICE_DT_INST_GET(0), 0);
2140 irq_enable(DT_INST_IRQ_BY_NAME(0, q3, irq));
2141 #endif
2142
2143 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
2144 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q4, irq),
2145 DT_INST_IRQ_BY_NAME(0, q4, priority),
2146 queue4_isr, DEVICE_DT_INST_GET(0), 0);
2147 irq_enable(DT_INST_IRQ_BY_NAME(0, q4, irq));
2148 #endif
2149
2150 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
2151 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q5, irq),
2152 DT_INST_IRQ_BY_NAME(0, q5, priority),
2153 queue5_isr, DEVICE_DT_INST_GET(0), 0);
2154 irq_enable(DT_INST_IRQ_BY_NAME(0, q5, irq));
2155 #endif
2156 }
2157
2158 PINCTRL_DT_INST_DEFINE(0);
2159
2160 static const struct eth_sam_dev_cfg eth0_config = {
2161 .regs = (Gmac *)DT_INST_REG_ADDR(0),
2162 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
2163 #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM
2164 .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0),
2165 #endif
2166 .config_func = eth0_irq_config,
2167 .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle))
2168 };
2169
2170 static struct eth_sam_dev_data eth0_data = {
2171 #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
2172 .mac_addr = DT_INST_PROP(0, local_mac_address),
2173 #endif
2174 .queue_list = {
2175 {
2176 .que_idx = GMAC_QUE_0,
2177 .rx_desc_list = {
2178 .buf = rx_desc_que0,
2179 .len = ARRAY_SIZE(rx_desc_que0),
2180 },
2181 .tx_desc_list = {
2182 .buf = tx_desc_que0,
2183 .len = ARRAY_SIZE(tx_desc_que0),
2184 },
2185 .rx_frag_list = rx_frag_list_que0,
2186 #if GMAC_MULTIPLE_TX_PACKETS == 1
2187 .tx_frag_list = {
2188 .buf = (uint32_t *)tx_frag_list_que0,
2189 .len = ARRAY_SIZE(tx_frag_list_que0),
2190 },
2191 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2192 .tx_frames = {
2193 .buf = (uint32_t *)tx_frame_list_que0,
2194 .len = ARRAY_SIZE(tx_frame_list_que0),
2195 },
2196 #endif
2197 #endif
2198 #if GMAC_PRIORITY_QUEUE_NUM >= 1
2199 }, {
2200 .que_idx = GMAC_QUE_1,
2201 .rx_desc_list = {
2202 .buf = rx_desc_que1,
2203 .len = ARRAY_SIZE(rx_desc_que1),
2204 },
2205 .tx_desc_list = {
2206 .buf = tx_desc_que1,
2207 .len = ARRAY_SIZE(tx_desc_que1),
2208 },
2209 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1
2210 .rx_frag_list = rx_frag_list_que1,
2211 #if GMAC_MULTIPLE_TX_PACKETS == 1
2212 .tx_frag_list = {
2213 .buf = (uint32_t *)tx_frag_list_que1,
2214 .len = ARRAY_SIZE(tx_frag_list_que1),
2215 },
2216 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2217 .tx_frames = {
2218 .buf = (uint32_t *)tx_frame_list_que1,
2219 .len = ARRAY_SIZE(tx_frame_list_que1),
2220 }
2221 #endif
2222 #endif
2223 #endif
2224 #endif
2225 #if GMAC_PRIORITY_QUEUE_NUM >= 2
2226 }, {
2227 .que_idx = GMAC_QUE_2,
2228 .rx_desc_list = {
2229 .buf = rx_desc_que2,
2230 .len = ARRAY_SIZE(rx_desc_que2),
2231 },
2232 .tx_desc_list = {
2233 .buf = tx_desc_que2,
2234 .len = ARRAY_SIZE(tx_desc_que2),
2235 },
2236 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2
2237 .rx_frag_list = rx_frag_list_que2,
2238 #if GMAC_MULTIPLE_TX_PACKETS == 1
2239 .tx_frag_list = {
2240 .buf = (uint32_t *)tx_frag_list_que2,
2241 .len = ARRAY_SIZE(tx_frag_list_que2),
2242 },
2243 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2244 .tx_frames = {
2245 .buf = (uint32_t *)tx_frame_list_que2,
2246 .len = ARRAY_SIZE(tx_frame_list_que2),
2247 }
2248 #endif
2249 #endif
2250 #endif
2251 #endif
2252 #if GMAC_PRIORITY_QUEUE_NUM >= 3
2253 }, {
2254 .que_idx = GMAC_QUE_3,
2255 .rx_desc_list = {
2256 .buf = rx_desc_que3,
2257 .len = ARRAY_SIZE(rx_desc_que3),
2258 },
2259 .tx_desc_list = {
2260 .buf = tx_desc_que3,
2261 .len = ARRAY_SIZE(tx_desc_que3),
2262 },
2263 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3
2264 .rx_frag_list = rx_frag_list_que3,
2265 #if GMAC_MULTIPLE_TX_PACKETS == 1
2266 .tx_frag_list = {
2267 .buf = (uint32_t *)tx_frag_list_que3,
2268 .len = ARRAY_SIZE(tx_frag_list_que3),
2269 },
2270 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2271 .tx_frames = {
2272 .buf = (uint32_t *)tx_frame_list_que3,
2273 .len = ARRAY_SIZE(tx_frame_list_que3),
2274 }
2275 #endif
2276 #endif
2277 #endif
2278 #endif
2279 #if GMAC_PRIORITY_QUEUE_NUM >= 4
2280 }, {
2281 .que_idx = GMAC_QUE_4,
2282 .rx_desc_list = {
2283 .buf = rx_desc_que4,
2284 .len = ARRAY_SIZE(rx_desc_que4),
2285 },
2286 .tx_desc_list = {
2287 .buf = tx_desc_que4,
2288 .len = ARRAY_SIZE(tx_desc_que4),
2289 },
2290 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4
2291 .rx_frag_list = rx_frag_list_que4,
2292 #if GMAC_MULTIPLE_TX_PACKETS == 1
2293 .tx_frag_list = {
2294 .buf = (uint32_t *)tx_frag_list_que4,
2295 .len = ARRAY_SIZE(tx_frag_list_que4),
2296 },
2297 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2298 .tx_frames = {
2299 .buf = (uint32_t *)tx_frame_list_que4,
2300 .len = ARRAY_SIZE(tx_frame_list_que4),
2301 }
2302 #endif
2303 #endif
2304 #endif
2305 #endif
2306 #if GMAC_PRIORITY_QUEUE_NUM >= 5
2307 }, {
2308 .que_idx = GMAC_QUE_5,
2309 .rx_desc_list = {
2310 .buf = rx_desc_que5,
2311 .len = ARRAY_SIZE(rx_desc_que5),
2312 },
2313 .tx_desc_list = {
2314 .buf = tx_desc_que5,
2315 .len = ARRAY_SIZE(tx_desc_que5),
2316 },
2317 #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5
2318 .rx_frag_list = rx_frag_list_que5,
2319 #if GMAC_MULTIPLE_TX_PACKETS == 1
2320 .tx_frag_list = {
2321 .buf = (uint32_t *)tx_frag_list_que5,
2322 .len = ARRAY_SIZE(tx_frag_list_que5),
2323 },
2324 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2325 .tx_frames = {
2326 .buf = (uint32_t *)tx_frame_list_que5,
2327 .len = ARRAY_SIZE(tx_frame_list_que5),
2328 }
2329 #endif
2330 #endif
2331 #endif
2332 #endif
2333 }
2334 },
2335 };
2336
2337 ETH_NET_DEVICE_DT_INST_DEFINE(0,
2338 eth_initialize, NULL, ð0_data,
2339 ð0_config, CONFIG_ETH_INIT_PRIORITY, ð_api,
2340 GMAC_MTU);
2341
2342 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC)
2343 struct ptp_context {
2344 const struct device *eth_dev;
2345 };
2346
2347 static struct ptp_context ptp_gmac_0_context;
2348
ptp_clock_sam_gmac_set(const struct device * dev,struct net_ptp_time * tm)2349 static int ptp_clock_sam_gmac_set(const struct device *dev,
2350 struct net_ptp_time *tm)
2351 {
2352 struct ptp_context *ptp_context = dev->data;
2353 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2354 Gmac *gmac = cfg->regs;
2355
2356 gmac->GMAC_TSH = tm->_sec.high & 0xffff;
2357 gmac->GMAC_TSL = tm->_sec.low & 0xffffffff;
2358 gmac->GMAC_TN = tm->nanosecond & 0xffffffff;
2359
2360 return 0;
2361 }
2362
ptp_clock_sam_gmac_get(const struct device * dev,struct net_ptp_time * tm)2363 static int ptp_clock_sam_gmac_get(const struct device *dev,
2364 struct net_ptp_time *tm)
2365 {
2366 struct ptp_context *ptp_context = dev->data;
2367 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2368 Gmac *gmac = cfg->regs;
2369
2370 tm->second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL;
2371 tm->nanosecond = gmac->GMAC_TN;
2372
2373 return 0;
2374 }
2375
ptp_clock_sam_gmac_adjust(const struct device * dev,int increment)2376 static int ptp_clock_sam_gmac_adjust(const struct device *dev, int increment)
2377 {
2378 struct ptp_context *ptp_context = dev->data;
2379 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config;
2380 Gmac *gmac = cfg->regs;
2381
2382 if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) {
2383 return -EINVAL;
2384 }
2385
2386 if (increment < 0) {
2387 gmac->GMAC_TA = GMAC_TA_ADJ | GMAC_TA_ITDT(-increment);
2388 } else {
2389 gmac->GMAC_TA = GMAC_TA_ITDT(increment);
2390 }
2391
2392 return 0;
2393 }
2394
ptp_clock_sam_gmac_rate_adjust(const struct device * dev,double ratio)2395 static int ptp_clock_sam_gmac_rate_adjust(const struct device *dev,
2396 double ratio)
2397 {
2398 return -ENOTSUP;
2399 }
2400
2401 static DEVICE_API(ptp_clock, ptp_api) = {
2402 .set = ptp_clock_sam_gmac_set,
2403 .get = ptp_clock_sam_gmac_get,
2404 .adjust = ptp_clock_sam_gmac_adjust,
2405 .rate_adjust = ptp_clock_sam_gmac_rate_adjust,
2406 };
2407
ptp_gmac_init(const struct device * port)2408 static int ptp_gmac_init(const struct device *port)
2409 {
2410 const struct device *const eth_dev = DEVICE_DT_INST_GET(0);
2411 struct eth_sam_dev_data *dev_data = eth_dev->data;
2412 struct ptp_context *ptp_context = port->data;
2413
2414 dev_data->ptp_clock = port;
2415 ptp_context->eth_dev = eth_dev;
2416
2417 return 0;
2418 }
2419
2420 DEVICE_DEFINE(gmac_ptp_clock_0, PTP_CLOCK_NAME, ptp_gmac_init,
2421 NULL, &ptp_gmac_0_context, NULL, POST_KERNEL,
2422 CONFIG_PTP_CLOCK_INIT_PRIORITY, &ptp_api);
2423
2424 #endif /* CONFIG_PTP_CLOCK_SAM_GMAC */
2425