Lines Matching +full:pre +full:- +full:div
6 * SPDX-License-Identifier: Apache-2.0
12 * This is a zero-copy networking implementation of an Ethernet driver. To
18 * - one shot PHY setup, no support for PHY disconnect/reconnect
19 * - no statistics collection
67 dcache_enabled = (SCB->CCR & SCB_CCR_DC_Msk); in dcache_is_enabled()
76 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1); in dcache_invalidate()
77 uint32_t size_full = size + addr - start_addr; in dcache_invalidate()
89 uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1); in dcache_clean()
90 uint32_t size_full = size + addr - start_addr; in dcache_clean()
119 #if CONFIG_NET_BUF_DATA_SIZE * (CONFIG_NET_BUF_RX_COUNT - \
121 #error (CONFIG_NET_BUF_RX_COUNT - CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) * \
255 if (queue->que_idx == GMAC_QUE_0) { in set_receive_buf_queue_pointer()
256 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; in set_receive_buf_queue_pointer()
258 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = in set_receive_buf_queue_pointer()
259 (uint32_t)queue->rx_desc_list.buf; in set_receive_buf_queue_pointer()
268 gmac->GMAC_IDRPQ[idx] = UINT32_MAX; in disable_all_priority_queue_interrupt()
269 (void)gmac->GMAC_ISRPQ[idx]; in disable_all_priority_queue_interrupt()
278 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0); in priority_queue_init()
279 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0); in priority_queue_init()
280 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk), in priority_queue_init()
282 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk), in priority_queue_init()
286 queue_index = queue->que_idx - 1; in priority_queue_init()
297 k_sem_init(&queue->tx_sem, 0, 1); in priority_queue_init()
299 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1, in priority_queue_init()
300 queue->tx_desc_list.len - 1); in priority_queue_init()
304 gmac->GMAC_RBSRPQ[queue_index] = in priority_queue_init()
308 gmac->GMAC_RBQBAPQ[queue_index] = (uint32_t)queue->rx_desc_list.buf; in priority_queue_init()
310 gmac->GMAC_TBQBAPQ[queue_index] = (uint32_t)queue->tx_desc_list.buf; in priority_queue_init()
313 gmac->GMAC_IERPQ[queue_index] = GMAC_INTPQ_EN_FLAGS; in priority_queue_init()
315 queue->err_rx_frames_dropped = 0U; in priority_queue_init()
316 queue->err_rx_flushed_count = 0U; in priority_queue_init()
317 queue->err_tx_flushed_count = 0U; in priority_queue_init()
319 LOG_INF("Queue %d activated", queue->que_idx); in priority_queue_init()
326 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; in priority_queue_init_as_idle()
327 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; in priority_queue_init_as_idle()
329 __ASSERT(!((uint32_t)rx_desc_list->buf & ~GMAC_RBQB_ADDR_Msk), in priority_queue_init_as_idle()
331 __ASSERT(!((uint32_t)tx_desc_list->buf & ~GMAC_TBQB_ADDR_Msk), in priority_queue_init_as_idle()
333 __ASSERT((rx_desc_list->len == 1U) && (tx_desc_list->len == 1U), in priority_queue_init_as_idle()
339 rx_desc_list->buf[0].w0 = GMAC_RXW0_WRAP; in priority_queue_init_as_idle()
340 rx_desc_list->buf[0].w1 = 0U; in priority_queue_init_as_idle()
342 tx_desc_list->buf[0].w0 = 0U; in priority_queue_init_as_idle()
344 tx_desc_list->buf[0].w1 = GMAC_TXW1_USED | GMAC_TXW1_WRAP; in priority_queue_init_as_idle()
347 gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = (uint32_t)rx_desc_list->buf; in priority_queue_init_as_idle()
349 gmac->GMAC_TBQBAPQ[queue->que_idx - 1] = (uint32_t)tx_desc_list->buf; in priority_queue_init_as_idle()
351 LOG_INF("Queue %d set to idle", queue->que_idx); in priority_queue_init_as_idle()
358 if (queue->que_idx == GMAC_QUE_0) { in queue_init()
360 } else if (queue->que_idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { in queue_init()
372 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; in set_receive_buf_queue_pointer()
408 rb->head = 0U; in ring_buf_reset()
409 rb->tail = 0U; in ring_buf_reset()
419 __ASSERT(rb->tail != rb->head, in ring_buf_get()
422 val = rb->buf[rb->tail]; in ring_buf_get()
423 MODULO_INC(rb->tail, rb->len); in ring_buf_get()
433 rb->buf[rb->head] = val; in ring_buf_put()
434 MODULO_INC(rb->head, rb->len); in ring_buf_put()
436 __ASSERT(rb->tail != rb->head, in ring_buf_put()
442 * Free pre-reserved RX buffers
462 gmac->GMAC_SA[index].GMAC_SAB = (mac_addr[3] << 24) in mac_addr_set()
466 gmac->GMAC_SA[index].GMAC_SAT = (mac_addr[5] << 8) in mac_addr_set()
475 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; in rx_descriptors_init()
476 struct net_buf **rx_frag_list = queue->rx_frag_list; in rx_descriptors_init()
482 rx_desc_list->tail = 0U; in rx_descriptors_init()
484 for (int i = 0; i < rx_desc_list->len; i++) { in rx_descriptors_init()
488 free_rx_bufs(rx_frag_list, rx_desc_list->len); in rx_descriptors_init()
490 return -ENOBUFS; in rx_descriptors_init()
495 rx_buf_addr = rx_buf->data; in rx_descriptors_init()
498 __ASSERT(rx_buf->size == CONFIG_NET_BUF_DATA_SIZE, in rx_descriptors_init()
501 rx_desc_list->buf[i].w0 = (uint32_t)rx_buf_addr & GMAC_RXW0_ADDR; in rx_descriptors_init()
502 rx_desc_list->buf[i].w1 = 0U; in rx_descriptors_init()
506 rx_desc_list->buf[rx_desc_list->len - 1U].w0 |= GMAC_RXW0_WRAP; in rx_descriptors_init()
516 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; in tx_descriptors_init()
518 tx_desc_list->head = 0U; in tx_descriptors_init()
519 tx_desc_list->tail = 0U; in tx_descriptors_init()
521 for (int i = 0; i < tx_desc_list->len; i++) { in tx_descriptors_init()
522 tx_desc_list->buf[i].w0 = 0U; in tx_descriptors_init()
523 tx_desc_list->buf[i].w1 = GMAC_TXW1_USED; in tx_descriptors_init()
527 tx_desc_list->buf[tx_desc_list->len - 1U].w1 |= GMAC_TXW1_WRAP; in tx_descriptors_init()
531 ring_buf_reset(&queue->tx_frag_list); in tx_descriptors_init()
533 ring_buf_reset(&queue->tx_frames); in tx_descriptors_init()
549 if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) { in check_gptp_msg()
561 if (pkt->frags->frags == NULL) { in check_gptp_msg()
565 gptp_hdr = (struct gptp_hdr *)pkt->frags->frags->data; in check_gptp_msg()
567 gptp_hdr = (struct gptp_hdr *)(pkt->frags->data + eth_hlen); in check_gptp_msg()
575 switch (hdr->message_type) { in need_timestamping()
586 if (GPTP_IS_EVENT_MSG(hdr->message_type)) { in update_pkt_priority()
597 ts.second = ((uint64_t)(gmac->GMAC_EFRSH & 0xffff) << 32) in get_ptp_event_rx_ts()
598 | gmac->GMAC_EFRSL; in get_ptp_event_rx_ts()
599 ts.nanosecond = gmac->GMAC_EFRN; in get_ptp_event_rx_ts()
608 ts.second = ((uint64_t)(gmac->GMAC_PEFRSH & 0xffff) << 32) in get_ptp_peer_event_rx_ts()
609 | gmac->GMAC_PEFRSL; in get_ptp_peer_event_rx_ts()
610 ts.nanosecond = gmac->GMAC_PEFRN; in get_ptp_peer_event_rx_ts()
619 ts.second = ((uint64_t)(gmac->GMAC_EFTSH & 0xffff) << 32) in get_ptp_event_tx_ts()
620 | gmac->GMAC_EFTSL; in get_ptp_event_tx_ts()
621 ts.nanosecond = gmac->GMAC_EFTN; in get_ptp_event_tx_ts()
630 ts.second = ((uint64_t)(gmac->GMAC_PEFTSH & 0xffff) << 32) in get_ptp_peer_event_tx_ts()
631 | gmac->GMAC_PEFTSL; in get_ptp_peer_event_tx_ts()
632 ts.nanosecond = gmac->GMAC_PEFTN; in get_ptp_peer_event_tx_ts()
641 ts.second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL; in get_current_ts()
642 ts.nanosecond = gmac->GMAC_TN; in get_current_ts()
654 switch (hdr->message_type) { in timestamp_tx_pkt()
674 switch (hdr->message_type) { in timestamp_rx_pkt()
692 return ctx->iface; in get_iface()
701 k_sem_give(&queue->tx_sem); in tx_completed()
703 struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; in tx_completed()
711 queue_list[queue->que_idx]); in tx_completed()
714 __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED, in tx_completed()
717 while (tx_desc_list->tail != tx_desc_list->head) { in tx_completed()
719 tx_desc = &tx_desc_list->buf[tx_desc_list->tail]; in tx_completed()
720 MODULO_INC(tx_desc_list->tail, tx_desc_list->len); in tx_completed()
721 k_sem_give(&queue->tx_desc_sem); in tx_completed()
724 frag = UINT_TO_POINTER(ring_buf_get(&queue->tx_frag_list)); in tx_completed()
728 if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) { in tx_completed()
731 pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames)); in tx_completed()
759 struct ring_buf *tx_frag_list = &queue->tx_frag_list; in tx_error_handler()
762 struct ring_buf *tx_frames = &queue->tx_frames; in tx_error_handler()
766 queue->err_tx_flushed_count++; in tx_error_handler()
769 gmac->GMAC_NCR &= ~GMAC_NCR_TXEN; in tx_error_handler()
773 while (tx_frag_list->tail != tx_frag_list->head) { in tx_error_handler()
775 frag = UINT_TO_POINTER(tx_frag_list->buf[tx_frag_list->tail]); in tx_error_handler()
778 MODULO_INC(tx_frag_list->tail, tx_frag_list->len); in tx_error_handler()
783 while (tx_frames->tail != tx_frames->head) { in tx_error_handler()
785 pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]); in tx_error_handler()
788 MODULO_INC(tx_frames->tail, tx_frames->len); in tx_error_handler()
793 k_sem_reset(&queue->tx_desc_sem); in tx_error_handler()
794 for (int i = 0; i < queue->tx_desc_list.len - 1; i++) { in tx_error_handler()
795 k_sem_give(&queue->tx_desc_sem); in tx_error_handler()
802 k_sem_give(&queue->tx_sem); in tx_error_handler()
806 gmac->GMAC_NCR |= GMAC_NCR_TXEN; in tx_error_handler()
814 queue->err_rx_flushed_count++; in rx_error_handler()
817 gmac->GMAC_NCR &= ~GMAC_NCR_RXEN; in rx_error_handler()
819 queue->rx_desc_list.tail = 0U; in rx_error_handler()
821 for (int i = 0; i < queue->rx_desc_list.len; i++) { in rx_error_handler()
822 queue->rx_desc_list.buf[i].w1 = 0U; in rx_error_handler()
823 queue->rx_desc_list.buf[i].w0 &= ~GMAC_RXW0_OWNERSHIP; in rx_error_handler()
829 gmac->GMAC_NCR |= GMAC_NCR_RXEN; in rx_error_handler()
855 mck_divisor = -ENOTSUP; in get_mck_clock_divisor()
866 return -EINVAL; in eth_sam_gmac_setup_qav()
871 gmac->GMAC_CBSCR |= GMAC_CBSCR_QAE; in eth_sam_gmac_setup_qav()
873 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE; in eth_sam_gmac_setup_qav()
877 gmac->GMAC_CBSCR |= GMAC_CBSCR_QBE; in eth_sam_gmac_setup_qav()
879 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE; in eth_sam_gmac_setup_qav()
890 return -EINVAL; in eth_sam_gmac_get_qav_status()
894 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QAE; in eth_sam_gmac_get_qav_status()
896 *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QBE; in eth_sam_gmac_get_qav_status()
909 return -EINVAL; in eth_sam_gmac_setup_qav_idle_slope()
912 cbscr_val = gmac->GMAC_CBSISQA; in eth_sam_gmac_setup_qav_idle_slope()
915 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE; in eth_sam_gmac_setup_qav_idle_slope()
916 gmac->GMAC_CBSISQA = idle_slope; in eth_sam_gmac_setup_qav_idle_slope()
918 gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE; in eth_sam_gmac_setup_qav_idle_slope()
919 gmac->GMAC_CBSISQB = idle_slope; in eth_sam_gmac_setup_qav_idle_slope()
922 gmac->GMAC_CBSCR = cbscr_val; in eth_sam_gmac_setup_qav_idle_slope()
933 * 1Gbps - therefore we cannot use the KB/MB macros - we have to in eth_sam_gmac_get_bandwidth()
936 if (gmac->GMAC_NCFGR & GMAC_NCFGR_SPD) { in eth_sam_gmac_get_bandwidth()
952 return -EINVAL; in eth_sam_gmac_get_qav_idle_slope()
956 *idle_slope = gmac->GMAC_CBSISQA; in eth_sam_gmac_get_qav_idle_slope()
958 *idle_slope = gmac->GMAC_CBSISQB; in eth_sam_gmac_get_qav_idle_slope()
985 /* Calculate percentage - instead of multiplying idle_slope by 100, in eth_sam_gmac_get_qav_delta_bandwidth()
986 * divide bandwidth - these numbers are so large that it should not in eth_sam_gmac_get_qav_delta_bandwidth()
1002 return -EINVAL; in eth_sam_gmac_setup_qav_delta_bandwidth()
1019 int div; in gmac_setup_ptp_clock_divisors() local
1028 div = mck_divs[i]; in gmac_setup_ptp_clock_divisors()
1029 while ((double)(min_cycles / div) == (int)(min_cycles / div) && in gmac_setup_ptp_clock_divisors()
1030 (double)(min_period / div) == (int)(min_period / div)) { in gmac_setup_ptp_clock_divisors()
1031 min_cycles /= div; in gmac_setup_ptp_clock_divisors()
1032 min_period /= div; in gmac_setup_ptp_clock_divisors()
1036 nit = min_cycles - 1; in gmac_setup_ptp_clock_divisors()
1044 acns = min_period - (nit * cns); in gmac_setup_ptp_clock_divisors()
1046 gmac->GMAC_TI = in gmac_setup_ptp_clock_divisors()
1048 gmac->GMAC_TISUBN = 0; in gmac_setup_ptp_clock_divisors()
1062 gmac->GMAC_NCR = GMAC_NCR_CLRSTAT | GMAC_NCR_MPE; in gmac_init()
1065 gmac->GMAC_IDR = UINT32_MAX; in gmac_init()
1067 (void)gmac->GMAC_ISR; in gmac_init()
1070 /* Setup Hash Registers - enable reception of all multicast frames when in gmac_init()
1073 gmac->GMAC_HRB = UINT32_MAX; in gmac_init()
1074 gmac->GMAC_HRT = UINT32_MAX; in gmac_init()
1076 gmac->GMAC_NCFGR = gmac_ncfgr_val | mck_divisor; in gmac_init()
1078 /* Default (RMII) is defined at atmel,gmac-common.yaml file */ in gmac_init()
1081 gmac->GMAC_UR = 0x1; in gmac_init()
1084 gmac->GMAC_UR = 0x0; in gmac_init()
1090 return -EINVAL; in gmac_init()
1097 gmac->GMAC_TN = 0; in gmac_init()
1098 gmac->GMAC_TSH = 0; in gmac_init()
1099 gmac->GMAC_TSL = 0; in gmac_init()
1113 * This does not work like that in SAM GMAC - the lower priority queues in gmac_init()
1146 val = gmac->GMAC_NCFGR; in link_configure()
1152 gmac->GMAC_NCFGR = val; in link_configure()
1154 gmac->GMAC_NCR |= (GMAC_NCR_RXEN | GMAC_NCR_TXEN); in link_configure()
1161 __ASSERT_NO_MSG(queue->rx_desc_list.len > 0); in nonpriority_queue_init()
1162 __ASSERT_NO_MSG(queue->tx_desc_list.len > 0); in nonpriority_queue_init()
1163 __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk), in nonpriority_queue_init()
1165 __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk), in nonpriority_queue_init()
1180 k_sem_init(&queue->tx_sem, 0, 1); in nonpriority_queue_init()
1186 k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1, in nonpriority_queue_init()
1187 queue->tx_desc_list.len - 1); in nonpriority_queue_init()
1191 gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; in nonpriority_queue_init()
1193 gmac->GMAC_TBQB = (uint32_t)queue->tx_desc_list.buf; in nonpriority_queue_init()
1196 gmac->GMAC_DCFGR = in nonpriority_queue_init()
1209 gmac->GMAC_IER = GMAC_INT_EN_FLAGS; in nonpriority_queue_init()
1211 queue->err_rx_frames_dropped = 0U; in nonpriority_queue_init()
1212 queue->err_rx_flushed_count = 0U; in nonpriority_queue_init()
1213 queue->err_tx_flushed_count = 0U; in nonpriority_queue_init()
1215 LOG_INF("Queue %d activated", queue->que_idx); in nonpriority_queue_init()
1222 struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; in frame_get()
1224 struct net_buf **rx_frag_list = queue->rx_frag_list; in frame_get()
1237 tail = rx_desc_list->tail; in frame_get()
1238 rx_desc = &rx_desc_list->buf[tail]; in frame_get()
1240 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) in frame_get()
1242 frame_is_complete = (bool)(rx_desc->w1 in frame_get()
1244 MODULO_INC(tail, rx_desc_list->len); in frame_get()
1245 rx_desc = &rx_desc_list->buf[tail]; in frame_get()
1257 tail = rx_desc_list->tail; in frame_get()
1258 rx_desc = &rx_desc_list->buf[tail]; in frame_get()
1265 __ASSERT(rx_desc->w1 & GMAC_RXW1_SOF, in frame_get()
1272 while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) in frame_get()
1276 (uint8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR); in frame_get()
1277 __ASSERT(frag->data == frag_data, in frame_get()
1279 frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF); in frame_get()
1281 frag_len = (rx_desc->w1 & GMAC_RXW1_LEN) - frame_len; in frame_get()
1291 dcache_invalidate((uint32_t)frag_data, frag->size); in frame_get()
1296 queue->err_rx_frames_dropped++; in frame_get()
1313 rx_desc->w1 = 0U; in frame_get()
1319 wrap = (tail == rx_desc_list->len-1U ? GMAC_RXW0_WRAP : 0); in frame_get()
1320 rx_desc->w0 = ((uint32_t)frag->data & GMAC_RXW0_ADDR) | wrap; in frame_get()
1322 MODULO_INC(tail, rx_desc_list->len); in frame_get()
1323 rx_desc = &rx_desc_list->buf[tail]; in frame_get()
1326 rx_desc_list->tail = tail; in frame_get()
1337 queue_list[queue->que_idx]); in eth_rx()
1340 const struct device *const dev = net_if_get_device(dev_data->iface); in eth_rx()
1341 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_rx()
1342 Gmac *gmac = cfg->regs; in eth_rx()
1404 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_tx()
1405 struct eth_sam_dev_data *const dev_data = dev->data; in eth_tx()
1406 Gmac *gmac = cfg->regs; in eth_tx()
1426 __ASSERT(pkt->frags, "Frame data missing"); in eth_tx()
1435 queue = &dev_data->queue_list[CONFIG_ETH_SAM_GMAC_FORCED_QUEUE]; in eth_tx()
1438 queue = &dev_data->queue_list[net_tx_priority2tc(pkt_prio)]; in eth_tx()
1440 /* If that's not possible due to config - use builtin mapping */ in eth_tx()
1441 queue = &dev_data->queue_list[priority2queue(pkt_prio)]; in eth_tx()
1444 tx_desc_list = &queue->tx_desc_list; in eth_tx()
1445 err_tx_flushed_count_at_entry = queue->err_tx_flushed_count; in eth_tx()
1447 frag = pkt->frags; in eth_tx()
1450 tx_first_desc = &tx_desc_list->buf[tx_desc_list->head]; in eth_tx()
1453 frag_data = frag->data; in eth_tx()
1454 frag_len = frag->len; in eth_tx()
1457 dcache_clean((uint32_t)frag_data, frag->size); in eth_tx()
1460 k_sem_take(&queue->tx_desc_sem, K_FOREVER); in eth_tx()
1469 if (queue->err_tx_flushed_count != in eth_tx()
1472 return -EIO; in eth_tx()
1476 tx_desc = &tx_desc_list->buf[tx_desc_list->head]; in eth_tx()
1479 tx_desc->w0 = (uint32_t)frag_data; in eth_tx()
1484 tx_desc->w1 = (frag_len & GMAC_TXW1_LEN) in eth_tx()
1485 | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0) in eth_tx()
1486 | (tx_desc_list->head == tx_desc_list->len - 1U in eth_tx()
1491 MODULO_INC(tx_desc_list->head, tx_desc_list->len); in eth_tx()
1494 __ASSERT(tx_desc_list->head != tx_desc_list->tail, in eth_tx()
1498 ring_buf_put(&queue->tx_frag_list, POINTER_TO_UINT(frag)); in eth_tx()
1507 frag = frag->frags; in eth_tx()
1514 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { in eth_tx()
1516 return -EIO; in eth_tx()
1521 tx_desc_list->buf[tx_desc_list->head].w1 = GMAC_TXW1_USED; in eth_tx()
1531 tx_first_desc->w1 &= ~GMAC_TXW1_USED; in eth_tx()
1536 ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt)); in eth_tx()
1551 gmac->GMAC_NCR |= GMAC_NCR_TSTART; in eth_tx()
1555 k_sem_take(&queue->tx_sem, K_FOREVER); in eth_tx()
1558 if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { in eth_tx()
1559 return -EIO; in eth_tx()
1577 const struct eth_sam_dev_cfg *const cfg = dev->config; in queue0_isr()
1578 struct eth_sam_dev_data *const dev_data = dev->data; in queue0_isr()
1579 Gmac *gmac = cfg->regs; in queue0_isr()
1587 isr = gmac->GMAC_ISR; in queue0_isr()
1590 queue = &dev_data->queue_list[0]; in queue0_isr()
1591 rx_desc_list = &queue->rx_desc_list; in queue0_isr()
1592 tx_desc_list = &queue->tx_desc_list; in queue0_isr()
1598 tail_desc = &rx_desc_list->buf[rx_desc_list->tail]; in queue0_isr()
1600 tail_desc->w1, in queue0_isr()
1601 rx_desc_list->tail); in queue0_isr()
1610 tail_desc = &tx_desc_list->buf[tx_desc_list->tail]; in queue0_isr()
1612 tail_desc->w1, in queue0_isr()
1613 tx_desc_list->tail); in queue0_isr()
1628 const struct eth_sam_dev_cfg *const cfg = dev->config; in priority_queue_isr()
1629 struct eth_sam_dev_data *const dev_data = dev->data; in priority_queue_isr()
1630 Gmac *gmac = cfg->regs; in priority_queue_isr()
1637 isrpq = gmac->GMAC_ISRPQ[queue_idx - 1]; in priority_queue_isr()
1638 LOG_DBG("GMAC_ISRPQ%d=0x%08x", queue_idx - 1, isrpq); in priority_queue_isr()
1640 queue = &dev_data->queue_list[queue_idx]; in priority_queue_isr()
1641 rx_desc_list = &queue->rx_desc_list; in priority_queue_isr()
1642 tx_desc_list = &queue->tx_desc_list; in priority_queue_isr()
1648 tail_desc = &rx_desc_list->buf[rx_desc_list->tail]; in priority_queue_isr()
1650 tail_desc->w1, in priority_queue_isr()
1651 rx_desc_list->tail); in priority_queue_isr()
1660 tail_desc = &tx_desc_list->buf[tx_desc_list->tail]; in priority_queue_isr()
1662 tail_desc->w1, in priority_queue_isr()
1663 tx_desc_list->tail); in priority_queue_isr()
1670 LOG_DBG("IERPQ%d HRESP", queue_idx - 1); in priority_queue_isr()
1712 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_initialize()
1715 cfg->config_func(); in eth_initialize()
1720 (clock_control_subsys_t)&cfg->clock_cfg); in eth_initialize()
1723 MCLK->AHBMASK.reg |= MCLK_AHBMASK_GMAC; in eth_initialize()
1727 retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); in eth_initialize()
1769 struct eth_sam_dev_data *const dev_data = dev->data; in phy_link_state_changed()
1770 const struct eth_sam_dev_cfg *const cfg = dev->config; in phy_link_state_changed()
1773 is_up = state->is_up; in phy_link_state_changed()
1775 if (is_up && !dev_data->link_up) { in phy_link_state_changed()
1779 dev_data->link_up = true; in phy_link_state_changed()
1780 net_eth_carrier_on(dev_data->iface); in phy_link_state_changed()
1783 link_configure(cfg->regs, in phy_link_state_changed()
1784 PHY_LINK_IS_FULL_DUPLEX(state->speed), in phy_link_state_changed()
1785 PHY_LINK_IS_SPEED_100M(state->speed)); in phy_link_state_changed()
1786 } else if (!is_up && dev_data->link_up) { in phy_link_state_changed()
1790 dev_data->link_up = false; in phy_link_state_changed()
1791 net_eth_carrier_off(dev_data->iface); in phy_link_state_changed()
1797 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_sam_gmac_get_phy()
1799 return cfg->phy_dev; in eth_sam_gmac_get_phy()
1805 struct eth_sam_dev_data *const dev_data = dev->data; in eth0_iface_init()
1806 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth0_iface_init()
1812 if (dev_data->iface == NULL) { in eth0_iface_init()
1813 dev_data->iface = iface; in eth0_iface_init()
1833 result = gmac_init(cfg->regs, gmac_ncfgr_val); in eth0_iface_init()
1839 generate_mac(dev_data->mac_addr); in eth0_iface_init()
1842 dev_data->mac_addr[0], dev_data->mac_addr[1], in eth0_iface_init()
1843 dev_data->mac_addr[2], dev_data->mac_addr[3], in eth0_iface_init()
1844 dev_data->mac_addr[4], dev_data->mac_addr[5]); in eth0_iface_init()
1847 mac_addr_set(cfg->regs, 0, dev_data->mac_addr); in eth0_iface_init()
1850 net_if_set_link_addr(iface, dev_data->mac_addr, in eth0_iface_init()
1851 sizeof(dev_data->mac_addr), in eth0_iface_init()
1856 result = queue_init(cfg->regs, &dev_data->queue_list[i]); in eth0_iface_init()
1866 cfg->regs->GMAC_ST1RPQ[i] = in eth0_iface_init()
1873 * Map them 1:1 - TC 0 -> Queue 0, TC 1 -> Queue 1 etc. in eth0_iface_init()
1876 cfg->regs->GMAC_ST1RPQ[i] = in eth0_iface_init()
1884 for (j = NET_PRIORITY_NC; j >= 0; --j) { in eth0_iface_init()
1890 if (i >= ARRAY_SIZE(cfg->regs->GMAC_ST2RPQ)) { in eth0_iface_init()
1895 cfg->regs->GMAC_ST2RPQ[i++] = in eth0_iface_init()
1903 if (device_is_ready(cfg->phy_dev)) { in eth0_iface_init()
1904 phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, in eth0_iface_init()
1912 if (!(dev_data->link_up)) { in eth0_iface_init()
1942 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_sam_gmac_set_qav_param()
1943 Gmac *gmac = cfg->regs; in eth_sam_gmac_set_qav_param()
1951 queue_id = config->qav_param.queue_id + 1; in eth_sam_gmac_set_qav_param()
1953 qav_param_type = config->qav_param.type; in eth_sam_gmac_set_qav_param()
1957 enable = config->qav_param.enabled; in eth_sam_gmac_set_qav_param()
1960 delta_bandwidth = config->qav_param.delta_bandwidth; in eth_sam_gmac_set_qav_param()
1965 idle_slope = config->qav_param.idle_slope; in eth_sam_gmac_set_qav_param()
1967 /* The standard uses bps, SAM GMAC uses Bps - convert now */ in eth_sam_gmac_set_qav_param()
1976 return -ENOTSUP; in eth_sam_gmac_set_qav_param()
1993 struct eth_sam_dev_data *const dev_data = dev->data; in eth_sam_gmac_set_config()
1994 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_sam_gmac_set_config()
1996 memcpy(dev_data->mac_addr, in eth_sam_gmac_set_config()
1997 config->mac_address.addr, in eth_sam_gmac_set_config()
1998 sizeof(dev_data->mac_addr)); in eth_sam_gmac_set_config()
2001 mac_addr_set(cfg->regs, 0, dev_data->mac_addr); in eth_sam_gmac_set_config()
2004 dev->name, in eth_sam_gmac_set_config()
2005 dev_data->mac_addr[0], dev_data->mac_addr[1], in eth_sam_gmac_set_config()
2006 dev_data->mac_addr[2], dev_data->mac_addr[3], in eth_sam_gmac_set_config()
2007 dev_data->mac_addr[4], dev_data->mac_addr[5]); in eth_sam_gmac_set_config()
2010 net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, in eth_sam_gmac_set_config()
2011 sizeof(dev_data->mac_addr), in eth_sam_gmac_set_config()
2016 result = -ENOTSUP; in eth_sam_gmac_set_config()
2028 const struct eth_sam_dev_cfg *const cfg = dev->config; in eth_sam_gmac_get_qav_param()
2029 Gmac *gmac = cfg->regs; in eth_sam_gmac_get_qav_param()
2037 queue_id = config->qav_param.queue_id + 1; in eth_sam_gmac_get_qav_param()
2039 qav_param_type = config->qav_param.type; in eth_sam_gmac_get_qav_param()
2043 enabled = &config->qav_param.enabled; in eth_sam_gmac_get_qav_param()
2046 idle_slope = &config->qav_param.idle_slope; in eth_sam_gmac_get_qav_param()
2050 idle_slope = &config->qav_param.oper_idle_slope; in eth_sam_gmac_get_qav_param()
2054 delta_bandwidth = &config->qav_param.delta_bandwidth; in eth_sam_gmac_get_qav_param()
2059 config->qav_param.traffic_class = queue_id; in eth_sam_gmac_get_qav_param()
2062 /* Invalid configuration - no direct TC to queue mapping */ in eth_sam_gmac_get_qav_param()
2063 return -ENOTSUP; in eth_sam_gmac_get_qav_param()
2069 return -ENOTSUP; in eth_sam_gmac_get_qav_param()
2079 config->priority_queues_num = GMAC_ACTIVE_PRIORITY_QUEUE_NUM; in eth_sam_gmac_get_config()
2089 return -ENOTSUP; in eth_sam_gmac_get_config()
2095 struct eth_sam_dev_data *const dev_data = dev->data; in eth_sam_gmac_get_ptp_clock()
2097 return dev_data->ptp_clock; in eth_sam_gmac_get_ptp_clock()
2352 struct ptp_context *ptp_context = dev->data; in ptp_clock_sam_gmac_set()
2353 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; in ptp_clock_sam_gmac_set()
2354 Gmac *gmac = cfg->regs; in ptp_clock_sam_gmac_set()
2356 gmac->GMAC_TSH = tm->_sec.high & 0xffff; in ptp_clock_sam_gmac_set()
2357 gmac->GMAC_TSL = tm->_sec.low & 0xffffffff; in ptp_clock_sam_gmac_set()
2358 gmac->GMAC_TN = tm->nanosecond & 0xffffffff; in ptp_clock_sam_gmac_set()
2366 struct ptp_context *ptp_context = dev->data; in ptp_clock_sam_gmac_get()
2367 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; in ptp_clock_sam_gmac_get()
2368 Gmac *gmac = cfg->regs; in ptp_clock_sam_gmac_get()
2370 tm->second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL; in ptp_clock_sam_gmac_get()
2371 tm->nanosecond = gmac->GMAC_TN; in ptp_clock_sam_gmac_get()
2378 struct ptp_context *ptp_context = dev->data; in ptp_clock_sam_gmac_adjust()
2379 const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; in ptp_clock_sam_gmac_adjust()
2380 Gmac *gmac = cfg->regs; in ptp_clock_sam_gmac_adjust()
2382 if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) { in ptp_clock_sam_gmac_adjust()
2383 return -EINVAL; in ptp_clock_sam_gmac_adjust()
2387 gmac->GMAC_TA = GMAC_TA_ADJ | GMAC_TA_ITDT(-increment); in ptp_clock_sam_gmac_adjust()
2389 gmac->GMAC_TA = GMAC_TA_ITDT(increment); in ptp_clock_sam_gmac_adjust()
2398 return -ENOTSUP; in ptp_clock_sam_gmac_rate_adjust()
2411 struct eth_sam_dev_data *dev_data = eth_dev->data; in ptp_gmac_init()
2412 struct ptp_context *ptp_context = port->data; in ptp_gmac_init()
2414 dev_data->ptp_clock = port; in ptp_gmac_init()
2415 ptp_context->eth_dev = eth_dev; in ptp_gmac_init()