Lines Matching +full:tx +full:- +full:rx +full:- +full:swap
5 * SPDX-License-Identifier: Apache-2.0
8 * - Only supports 32-bit addresses in buffer descriptors, therefore
9 * the ZynqMP APU (Cortex-A53 cores) may not be fully supported.
10 * - Hardware timestamps not considered.
11 * - VLAN tags not considered.
12 * - Wake-on-LAN interrupt not supported.
13 * - Send function is not SMP-capable (due to single TX done semaphore).
14 * - Interrupt-driven PHY management not supported - polling only.
15 * - No explicit placement of the DMA memory area(s) in either a
18 * with the Cortex-R5 QEMU target or an actual R5 running without the
20 * - No detailed error handling when evaluating the Interrupt Status,
21 * RX Status and TX Status registers.
82 * Insert the configuration & run-time data for all GEM instances which
97 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in DT_INST_FOREACH_STATUS_OKAY()
103 if (dev_conf->init_phy) { in DT_INST_FOREACH_STATUS_OKAY()
104 __ASSERT((dev_conf->phy_mdio_addr_fix >= 0 && in DT_INST_FOREACH_STATUS_OKAY()
105 dev_conf->phy_mdio_addr_fix <= 32), in DT_INST_FOREACH_STATUS_OKAY()
107 "1 to 32, or 0 for auto-detection", in DT_INST_FOREACH_STATUS_OKAY()
108 dev->name, dev_conf->phy_mdio_addr_fix); in DT_INST_FOREACH_STATUS_OKAY()
109 __ASSERT(dev_conf->phy_poll_interval > 0, in DT_INST_FOREACH_STATUS_OKAY()
111 "interval", dev->name); in DT_INST_FOREACH_STATUS_OKAY()
115 __ASSERT((dev_conf->max_link_speed == LINK_10MBIT || in DT_INST_FOREACH_STATUS_OKAY()
116 dev_conf->max_link_speed == LINK_100MBIT || in DT_INST_FOREACH_STATUS_OKAY()
117 dev_conf->max_link_speed == LINK_1GBIT), in DT_INST_FOREACH_STATUS_OKAY()
119 dev->name, (uint32_t)dev_conf->max_link_speed); in DT_INST_FOREACH_STATUS_OKAY()
123 __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_48, in DT_INST_FOREACH_STATUS_OKAY()
125 "range 0 to %u", dev->name, dev_conf->mdc_divider, in DT_INST_FOREACH_STATUS_OKAY()
128 __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_224, in DT_INST_FOREACH_STATUS_OKAY()
130 "range 0 to %u", dev->name, dev_conf->mdc_divider, in DT_INST_FOREACH_STATUS_OKAY()
135 __ASSERT((dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_32BIT || in DT_INST_FOREACH_STATUS_OKAY()
136 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_64BIT || in DT_INST_FOREACH_STATUS_OKAY()
137 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_128BIT), in DT_INST_FOREACH_STATUS_OKAY()
139 dev->name); in DT_INST_FOREACH_STATUS_OKAY()
140 __ASSERT((dev_conf->ahb_burst_length == AHB_BURST_SINGLE || in DT_INST_FOREACH_STATUS_OKAY()
141 dev_conf->ahb_burst_length == AHB_BURST_INCR4 || in DT_INST_FOREACH_STATUS_OKAY()
142 dev_conf->ahb_burst_length == AHB_BURST_INCR8 || in DT_INST_FOREACH_STATUS_OKAY()
143 dev_conf->ahb_burst_length == AHB_BURST_INCR16), in DT_INST_FOREACH_STATUS_OKAY()
145 dev->name); in DT_INST_FOREACH_STATUS_OKAY()
147 /* HW RX buffer size */ in DT_INST_FOREACH_STATUS_OKAY()
148 __ASSERT((dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_8KB || in DT_INST_FOREACH_STATUS_OKAY()
149 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_4KB || in DT_INST_FOREACH_STATUS_OKAY()
150 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_2KB || in DT_INST_FOREACH_STATUS_OKAY()
151 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_1KB), in DT_INST_FOREACH_STATUS_OKAY()
152 "%s hardware RX buffer size configuration is invalid", in DT_INST_FOREACH_STATUS_OKAY()
153 dev->name); in DT_INST_FOREACH_STATUS_OKAY()
155 /* HW RX buffer offset */ in DT_INST_FOREACH_STATUS_OKAY()
156 __ASSERT(dev_conf->hw_rx_buffer_offset <= 3, in DT_INST_FOREACH_STATUS_OKAY()
157 "%s hardware RX buffer offset %u is invalid, must be in " in DT_INST_FOREACH_STATUS_OKAY()
158 "range 0 to 3", dev->name, dev_conf->hw_rx_buffer_offset); in DT_INST_FOREACH_STATUS_OKAY()
161 * RX & TX buffer sizes in DT_INST_FOREACH_STATUS_OKAY()
162 * RX Buffer size must be a multiple of 64, as the size of the in DT_INST_FOREACH_STATUS_OKAY()
166 __ASSERT(dev_conf->rx_buffer_size % 64 == 0, in DT_INST_FOREACH_STATUS_OKAY()
167 "%s RX buffer size %u is not a multiple of 64 bytes", in DT_INST_FOREACH_STATUS_OKAY()
168 dev->name, dev_conf->rx_buffer_size); in DT_INST_FOREACH_STATUS_OKAY()
169 __ASSERT((dev_conf->rx_buffer_size != 0 && in DT_INST_FOREACH_STATUS_OKAY()
170 dev_conf->rx_buffer_size <= 16320), in DT_INST_FOREACH_STATUS_OKAY()
171 "%s RX buffer size %u is invalid, should be >64, " in DT_INST_FOREACH_STATUS_OKAY()
172 "must be 16320 bytes maximum.", dev->name, in DT_INST_FOREACH_STATUS_OKAY()
173 dev_conf->rx_buffer_size); in DT_INST_FOREACH_STATUS_OKAY()
174 __ASSERT((dev_conf->tx_buffer_size != 0 && in DT_INST_FOREACH_STATUS_OKAY()
175 dev_conf->tx_buffer_size <= 16380), in DT_INST_FOREACH_STATUS_OKAY()
176 "%s TX buffer size %u is invalid, should be >64, " in DT_INST_FOREACH_STATUS_OKAY()
177 "must be 16380 bytes maximum.", dev->name, in DT_INST_FOREACH_STATUS_OKAY()
178 dev_conf->tx_buffer_size); in DT_INST_FOREACH_STATUS_OKAY()
182 __ASSERT(!dev_conf->enable_rx_chksum_offload, in DT_INST_FOREACH_STATUS_OKAY()
185 __ASSERT(!dev_conf->enable_tx_chksum_offload, in DT_INST_FOREACH_STATUS_OKAY()
191 * Initialization procedure as described in the Zynq-7000 TRM, in DT_INST_FOREACH_STATUS_OKAY()
199 /* Enable MDIO -> set gem.net_ctrl[mgmt_port_en] */ in DT_INST_FOREACH_STATUS_OKAY()
200 if (dev_conf->init_phy) { in DT_INST_FOREACH_STATUS_OKAY()
201 reg_val = sys_read32(dev_conf->base_addr + in DT_INST_FOREACH_STATUS_OKAY()
204 sys_write32(reg_val, dev_conf->base_addr + in DT_INST_FOREACH_STATUS_OKAY()
209 if (dev_conf->init_phy) { in DT_INST_FOREACH_STATUS_OKAY()
226 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_iface_init()
227 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_iface_init()
229 /* Set the initial contents of the current instance's run-time data */ in eth_xlnx_gem_iface_init()
230 dev_data->iface = iface; in eth_xlnx_gem_iface_init()
231 net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET); in eth_xlnx_gem_iface_init()
236 * Initialize the (delayed) work items for RX pending, TX done in eth_xlnx_gem_iface_init()
239 k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work); in eth_xlnx_gem_iface_init()
240 k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work); in eth_xlnx_gem_iface_init()
241 k_work_init_delayable(&dev_data->phy_poll_delayed_work, in eth_xlnx_gem_iface_init()
244 /* Initialize TX completion semaphore */ in eth_xlnx_gem_iface_init()
245 k_sem_init(&dev_data->tx_done_sem, 0, 1); in eth_xlnx_gem_iface_init()
248 * Initialize semaphores in the RX/TX BD rings which have not in eth_xlnx_gem_iface_init()
251 k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1); in eth_xlnx_gem_iface_init()
252 /* RX BD ring semaphore is not required at the time being */ in eth_xlnx_gem_iface_init()
255 dev_conf->config_func(dev); in eth_xlnx_gem_iface_init()
258 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); in eth_xlnx_gem_iface_init()
264 * and either immediately handles RX pending / TX complete notifications
271 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_isr()
272 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_isr()
276 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_isr()
280 * interrupt status register. -> For now, just log them in eth_xlnx_gem_isr()
284 dev->name, reg_val); in eth_xlnx_gem_isr()
289 * reg_val & 0x00000080 -> gem.intr_status bit [7] = Frame TX complete in eth_xlnx_gem_isr()
290 * reg_val & 0x00000002 -> gem.intr_status bit [1] = Frame received in eth_xlnx_gem_isr()
291 * comp. Zynq-7000 TRM, Chapter B.18, p. 1289/1290. in eth_xlnx_gem_isr()
298 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_isr()
300 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_isr()
301 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_isr()
302 k_work_submit(&dev_data->tx_done_work); in eth_xlnx_gem_isr()
309 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_isr()
311 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_isr()
312 if (dev_conf->defer_rxp_to_queue) { in eth_xlnx_gem_isr()
313 k_work_submit(&dev_data->rx_pend_work); in eth_xlnx_gem_isr()
320 * Clear all interrupt status bits so that the interrupt is de-asserted in eth_xlnx_gem_isr()
321 * by the GEM. -> TXSR/RXSR are read/cleared by either eth_xlnx_gem_- in eth_xlnx_gem_isr()
323 * are not deferred to the system's work queue for the current inter- in eth_xlnx_gem_isr()
330 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_isr()
335 * GEM data send function. Blocks until a TX complete notification has been
340 * @retval -EINVAL in case of invalid parameters, e.g. zero data length
341 * @retval -EIO in case of:
342 * (1) the attempt to TX data while the device is stopped,
344 * (2) the attempt to TX data while no free buffers are available
351 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_send()
352 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_send()
366 if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN || in eth_xlnx_gem_send()
367 (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) { in eth_xlnx_gem_send()
369 dev_data->stats.tx_dropped++; in eth_xlnx_gem_send()
371 return -EIO; in eth_xlnx_gem_send()
376 LOG_ERR("%s cannot TX, zero packet length", dev->name); in eth_xlnx_gem_send()
378 dev_data->stats.errors.tx++; in eth_xlnx_gem_send()
380 return -EINVAL; in eth_xlnx_gem_send()
386 * the case. Update the 'next to use' BD index in the TX BD ring if in eth_xlnx_gem_send()
387 * sufficient space is available. If TX done handling, where the BD in eth_xlnx_gem_send()
390 * ring's data by taking the ring's semaphore. If TX done handling in eth_xlnx_gem_send()
392 * disabling the TX done interrupt source. in eth_xlnx_gem_send()
394 bds_reqd = (uint8_t)((tx_data_length + (dev_conf->tx_buffer_size - 1)) / in eth_xlnx_gem_send()
395 dev_conf->tx_buffer_size); in eth_xlnx_gem_send()
397 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_send()
398 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); in eth_xlnx_gem_send()
401 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_send()
404 if (bds_reqd > dev_data->txbd_ring.free_bds) { in eth_xlnx_gem_send()
405 LOG_ERR("%s cannot TX, packet length %hu requires " in eth_xlnx_gem_send()
407 dev->name, tx_data_length, bds_reqd, in eth_xlnx_gem_send()
408 dev_data->txbd_ring.free_bds); in eth_xlnx_gem_send()
410 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_send()
411 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_send()
414 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); in eth_xlnx_gem_send()
417 dev_data->stats.tx_dropped++; in eth_xlnx_gem_send()
419 return -EIO; in eth_xlnx_gem_send()
422 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use; in eth_xlnx_gem_send()
423 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
425 dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) % in eth_xlnx_gem_send()
426 dev_conf->txbd_count; in eth_xlnx_gem_send()
427 dev_data->txbd_ring.free_bds -= bds_reqd; in eth_xlnx_gem_send()
429 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_send()
430 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_send()
433 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); in eth_xlnx_gem_send()
442 /* Calculate the base pointer of the target TX buffer */ in eth_xlnx_gem_send()
443 tx_buffer_offs = (void *)(dev_data->first_tx_buffer + in eth_xlnx_gem_send()
444 (dev_conf->tx_buffer_size * curr_bd_idx)); in eth_xlnx_gem_send()
448 (tx_data_remaining < dev_conf->tx_buffer_size) ? in eth_xlnx_gem_send()
449 tx_data_remaining : dev_conf->tx_buffer_size); in eth_xlnx_gem_send()
454 reg_val |= (tx_data_remaining < dev_conf->tx_buffer_size) ? in eth_xlnx_gem_send()
455 tx_data_remaining : dev_conf->tx_buffer_size; in eth_xlnx_gem_send()
458 if (tx_data_remaining > dev_conf->tx_buffer_size) { in eth_xlnx_gem_send()
460 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; in eth_xlnx_gem_send()
461 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
464 tx_data_remaining -= (tx_data_remaining < dev_conf->tx_buffer_size) ? in eth_xlnx_gem_send()
465 tx_data_remaining : dev_conf->tx_buffer_size; in eth_xlnx_gem_send()
474 * Zynq-7000 TRM, the 'used' bits shall be cleared in reverse in eth_xlnx_gem_send()
482 curr_bd_idx = (curr_bd_idx != 0) ? (curr_bd_idx - 1) : in eth_xlnx_gem_send()
483 (dev_conf->txbd_count - 1); in eth_xlnx_gem_send()
484 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
490 /* Set the start TX bit in the gem.net_ctrl register */ in eth_xlnx_gem_send()
491 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_send()
493 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_send()
496 dev_data->stats.bytes.sent += tx_data_length; in eth_xlnx_gem_send()
497 dev_data->stats.pkts.tx++; in eth_xlnx_gem_send()
500 /* Block until TX has completed */ in eth_xlnx_gem_send()
501 sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100)); in eth_xlnx_gem_send()
503 LOG_ERR("%s TX confirmation timed out", dev->name); in eth_xlnx_gem_send()
505 dev_data->stats.tx_timeout_count++; in eth_xlnx_gem_send()
507 return -EIO; in eth_xlnx_gem_send()
516 * pending interrupts, enables RX and TX, enables interrupts. If
526 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_start_device()
527 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_start_device()
530 if (dev_data->started) { in eth_xlnx_gem_start_device()
533 dev_data->started = true; in eth_xlnx_gem_start_device()
537 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_start_device()
539 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_start_device()
541 /* Clear RX & TX status registers */ in eth_xlnx_gem_start_device()
542 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); in eth_xlnx_gem_start_device()
543 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); in eth_xlnx_gem_start_device()
545 /* RX and TX enable */ in eth_xlnx_gem_start_device()
546 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_start_device()
548 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_start_device()
552 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); in eth_xlnx_gem_start_device()
555 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) { in eth_xlnx_gem_start_device()
556 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); in eth_xlnx_gem_start_device()
559 LOG_DBG("%s started", dev->name); in eth_xlnx_gem_start_device()
566 * RX and TX, clears all status registers. If no PHY is managed
575 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_stop_device()
576 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_stop_device()
579 if (!dev_data->started) { in eth_xlnx_gem_stop_device()
582 dev_data->started = false; in eth_xlnx_gem_stop_device()
585 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) { in eth_xlnx_gem_stop_device()
586 k_work_cancel_delayable(&dev_data->phy_poll_delayed_work); in eth_xlnx_gem_stop_device()
589 /* RX and TX disable */ in eth_xlnx_gem_stop_device()
590 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_stop_device()
592 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_stop_device()
596 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_stop_device()
598 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); in eth_xlnx_gem_stop_device()
600 /* Clear RX & TX status registers */ in eth_xlnx_gem_stop_device()
601 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); in eth_xlnx_gem_stop_device()
602 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); in eth_xlnx_gem_stop_device()
604 LOG_DBG("%s stopped", dev->name); in eth_xlnx_gem_stop_device()
620 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_get_capabilities()
623 if (dev_conf->max_link_speed == LINK_1GBIT) { in eth_xlnx_gem_get_capabilities()
624 if (dev_conf->phy_advertise_lower) { in eth_xlnx_gem_get_capabilities()
631 } else if (dev_conf->max_link_speed == LINK_100MBIT) { in eth_xlnx_gem_get_capabilities()
632 if (dev_conf->phy_advertise_lower) { in eth_xlnx_gem_get_capabilities()
642 if (dev_conf->enable_rx_chksum_offload) { in eth_xlnx_gem_get_capabilities()
646 if (dev_conf->enable_tx_chksum_offload) { in eth_xlnx_gem_get_capabilities()
650 if (dev_conf->enable_fdx) { in eth_xlnx_gem_get_capabilities()
654 if (dev_conf->copy_all_frames) { in eth_xlnx_gem_get_capabilities()
670 * Currently only supports querying the RX and TX hardware checksum
678 * queried, -ENOTSUP if the specified configuration item
685 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_get_config()
689 if (dev_conf->enable_rx_chksum_offload) { in eth_xlnx_gem_get_config()
690 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER | in eth_xlnx_gem_get_config()
695 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE; in eth_xlnx_gem_get_config()
699 if (dev_conf->enable_tx_chksum_offload) { in eth_xlnx_gem_get_config()
700 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER | in eth_xlnx_gem_get_config()
705 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE; in eth_xlnx_gem_get_config()
709 return -ENOTSUP; in eth_xlnx_gem_get_config()
723 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_stats()
725 return &dev_data->stats; in eth_xlnx_gem_stats()
738 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_reset_hw()
741 * Controller reset sequence as described in the Zynq-7000 TRM, in eth_xlnx_gem_reset_hw()
747 dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_reset_hw()
751 dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); in eth_xlnx_gem_reset_hw()
753 /* Clear the RX/TX status registers */ in eth_xlnx_gem_reset_hw()
755 dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); in eth_xlnx_gem_reset_hw()
757 dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); in eth_xlnx_gem_reset_hw()
761 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); in eth_xlnx_gem_reset_hw()
765 dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); in eth_xlnx_gem_reset_hw()
767 dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); in eth_xlnx_gem_reset_hw()
772 * Calculates the pre-scalers for the TX clock to match the current
782 * in the Zynq-7000 TRM, chapter 16.3.3, is not tackled here. This in eth_xlnx_gem_configure_clocks()
784 * values for the respective GEM's TX clock are calculated here. in eth_xlnx_gem_configure_clocks()
787 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_configure_clocks()
788 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_configure_clocks()
796 if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) { in eth_xlnx_gem_configure_clocks()
798 * Run-time data indicates 'link down' or PHY management in eth_xlnx_gem_configure_clocks()
799 * is disabled for the current device -> this indicates the in eth_xlnx_gem_configure_clocks()
801 * delayed work handler has picked up the result of the auto- in eth_xlnx_gem_configure_clocks()
802 * negotiation (if enabled), this if-statement will evaluate in eth_xlnx_gem_configure_clocks()
805 if (dev_conf->max_link_speed == LINK_10MBIT) { in eth_xlnx_gem_configure_clocks()
807 } else if (dev_conf->max_link_speed == LINK_100MBIT) { in eth_xlnx_gem_configure_clocks()
809 } else if (dev_conf->max_link_speed == LINK_1GBIT) { in eth_xlnx_gem_configure_clocks()
812 } else if (dev_data->eff_link_speed != LINK_DOWN) { in eth_xlnx_gem_configure_clocks()
817 if (dev_data->eff_link_speed == LINK_10MBIT) { in eth_xlnx_gem_configure_clocks()
819 } else if (dev_data->eff_link_speed == LINK_100MBIT) { in eth_xlnx_gem_configure_clocks()
821 } else if (dev_data->eff_link_speed == LINK_1GBIT) { in eth_xlnx_gem_configure_clocks()
833 tmp = ((dev_conf->pll_clock_frequency / div0) / div1); in eth_xlnx_gem_configure_clocks()
834 if (tmp >= (target - 10) && tmp <= (target + 10)) { in eth_xlnx_gem_configure_clocks()
838 if (tmp >= (target - 10) && tmp <= (target + 10)) { in eth_xlnx_gem_configure_clocks()
852 clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address); in eth_xlnx_gem_configure_clocks()
873 sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address); in eth_xlnx_gem_configure_clocks()
878 clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address); in eth_xlnx_gem_configure_clocks()
888 sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address); in eth_xlnx_gem_configure_clocks()
892 "frequency %u Hz", dev->name, div0, div1, target); in eth_xlnx_gem_configure_clocks()
900 * the current target is a Zynq-7000 or a ZynqMP.
906 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_set_initial_nwcfg()
909 if (dev_conf->ignore_ipg_rxer) { in eth_xlnx_gem_set_initial_nwcfg()
913 if (dev_conf->disable_reject_nsp) { in eth_xlnx_gem_set_initial_nwcfg()
914 /* [29] disable rejection of non-standard preamble */ in eth_xlnx_gem_set_initial_nwcfg()
917 if (dev_conf->enable_ipg_stretch) { in eth_xlnx_gem_set_initial_nwcfg()
921 if (dev_conf->enable_sgmii_mode) { in eth_xlnx_gem_set_initial_nwcfg()
925 if (dev_conf->disable_reject_fcs_crc_errors) { in eth_xlnx_gem_set_initial_nwcfg()
929 if (dev_conf->enable_rx_halfdup_while_tx) { in eth_xlnx_gem_set_initial_nwcfg()
930 /* [25] RX half duplex while TX enable */ in eth_xlnx_gem_set_initial_nwcfg()
933 if (dev_conf->enable_rx_chksum_offload) { in eth_xlnx_gem_set_initial_nwcfg()
934 /* [24] enable RX IP/TCP/UDP checksum offload */ in eth_xlnx_gem_set_initial_nwcfg()
937 if (dev_conf->disable_pause_copy) { in eth_xlnx_gem_set_initial_nwcfg()
942 reg_val |= (((uint32_t)(dev_conf->amba_dbus_width) & in eth_xlnx_gem_set_initial_nwcfg()
946 reg_val |= (((uint32_t)dev_conf->mdc_divider & in eth_xlnx_gem_set_initial_nwcfg()
949 if (dev_conf->discard_rx_fcs) { in eth_xlnx_gem_set_initial_nwcfg()
953 if (dev_conf->discard_rx_length_errors) { in eth_xlnx_gem_set_initial_nwcfg()
954 /* [16] RX length error discard */ in eth_xlnx_gem_set_initial_nwcfg()
957 /* [15..14] RX buffer offset */ in eth_xlnx_gem_set_initial_nwcfg()
958 reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_offset & in eth_xlnx_gem_set_initial_nwcfg()
961 if (dev_conf->enable_pause) { in eth_xlnx_gem_set_initial_nwcfg()
962 /* [13] Enable pause TX */ in eth_xlnx_gem_set_initial_nwcfg()
965 if (dev_conf->enable_tbi) { in eth_xlnx_gem_set_initial_nwcfg()
969 if (dev_conf->ext_addr_match) { in eth_xlnx_gem_set_initial_nwcfg()
973 if (dev_conf->enable_1536_frames) { in eth_xlnx_gem_set_initial_nwcfg()
977 if (dev_conf->enable_ucast_hash) { in eth_xlnx_gem_set_initial_nwcfg()
981 if (dev_conf->enable_mcast_hash) { in eth_xlnx_gem_set_initial_nwcfg()
985 if (dev_conf->disable_bcast) { in eth_xlnx_gem_set_initial_nwcfg()
989 if (dev_conf->copy_all_frames) { in eth_xlnx_gem_set_initial_nwcfg()
993 if (dev_conf->discard_non_vlan) { in eth_xlnx_gem_set_initial_nwcfg()
997 if (dev_conf->enable_fdx) { in eth_xlnx_gem_set_initial_nwcfg()
1001 if (dev_conf->max_link_speed == LINK_100MBIT) { in eth_xlnx_gem_set_initial_nwcfg()
1004 } else if (dev_conf->max_link_speed == LINK_1GBIT) { in eth_xlnx_gem_set_initial_nwcfg()
1009 * No else-branch for 10Mbit/s mode: in eth_xlnx_gem_set_initial_nwcfg()
1014 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); in eth_xlnx_gem_set_initial_nwcfg()
1019 * Updates only the link speed-related bits of the Network Configuration
1026 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_set_nwcfg_link_speed()
1027 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_set_nwcfg_link_speed()
1032 * the link speed-related bits in eth_xlnx_gem_set_nwcfg_link_speed()
1034 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); in eth_xlnx_gem_set_nwcfg_link_speed()
1038 if (dev_data->eff_link_speed == LINK_100MBIT) { in eth_xlnx_gem_set_nwcfg_link_speed()
1040 } else if (dev_data->eff_link_speed == LINK_1GBIT) { in eth_xlnx_gem_set_nwcfg_link_speed()
1045 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); in eth_xlnx_gem_set_nwcfg_link_speed()
1060 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_set_mac_address()
1061 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_set_mac_address()
1065 regval_bot = (dev_data->mac_addr[0] & 0xFF); in eth_xlnx_gem_set_mac_address()
1066 regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8; in eth_xlnx_gem_set_mac_address()
1067 regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16; in eth_xlnx_gem_set_mac_address()
1068 regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24; in eth_xlnx_gem_set_mac_address()
1070 regval_top = (dev_data->mac_addr[4] & 0xFF); in eth_xlnx_gem_set_mac_address()
1071 regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8; in eth_xlnx_gem_set_mac_address()
1073 sys_write32(regval_bot, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1L_OFFSET); in eth_xlnx_gem_set_mac_address()
1074 sys_write32(regval_top, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1H_OFFSET); in eth_xlnx_gem_set_mac_address()
1077 dev->name, in eth_xlnx_gem_set_mac_address()
1078 dev_data->mac_addr[0], in eth_xlnx_gem_set_mac_address()
1079 dev_data->mac_addr[1], in eth_xlnx_gem_set_mac_address()
1080 dev_data->mac_addr[2], in eth_xlnx_gem_set_mac_address()
1081 dev_data->mac_addr[3], in eth_xlnx_gem_set_mac_address()
1082 dev_data->mac_addr[4], in eth_xlnx_gem_set_mac_address()
1083 dev_data->mac_addr[5]); in eth_xlnx_gem_set_mac_address()
1096 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_set_initial_dmacr()
1101 * comp. Zynq-7000 TRM, p. 1278 ff. in eth_xlnx_gem_set_initial_dmacr()
1104 if (dev_conf->disc_rx_ahb_unavail) { in eth_xlnx_gem_set_initial_dmacr()
1105 /* [24] Discard RX packet when AHB unavailable */ in eth_xlnx_gem_set_initial_dmacr()
1109 * [23..16] DMA RX buffer size in AHB system memory in eth_xlnx_gem_set_initial_dmacr()
1112 reg_val |= (((dev_conf->rx_buffer_size / 64) & in eth_xlnx_gem_set_initial_dmacr()
1115 if (dev_conf->enable_tx_chksum_offload) { in eth_xlnx_gem_set_initial_dmacr()
1116 /* [11] TX TCP/UDP/IP checksum offload to GEM */ in eth_xlnx_gem_set_initial_dmacr()
1119 if (dev_conf->tx_buffer_size_full) { in eth_xlnx_gem_set_initial_dmacr()
1120 /* [10] TX buffer memory size select */ in eth_xlnx_gem_set_initial_dmacr()
1124 * [09..08] RX packet buffer memory size select in eth_xlnx_gem_set_initial_dmacr()
1127 reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_size << in eth_xlnx_gem_set_initial_dmacr()
1130 if (dev_conf->enable_ahb_packet_endian_swap) { in eth_xlnx_gem_set_initial_dmacr()
1131 /* [07] AHB packet data endian swap enable */ in eth_xlnx_gem_set_initial_dmacr()
1134 if (dev_conf->enable_ahb_md_endian_swap) { in eth_xlnx_gem_set_initial_dmacr()
1135 /* [06] AHB mgmt descriptor endian swap enable */ in eth_xlnx_gem_set_initial_dmacr()
1145 reg_val |= ((uint32_t)dev_conf->ahb_burst_length & in eth_xlnx_gem_set_initial_dmacr()
1149 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_DMACR_OFFSET); in eth_xlnx_gem_set_initial_dmacr()
1163 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_init_phy()
1166 LOG_DBG("%s attempting to initialize associated PHY", dev->name); in eth_xlnx_gem_init_phy()
1173 * vendor-specific implementations of the PHY management in eth_xlnx_gem_init_phy()
1174 * functions to the run-time device data struct, along with in eth_xlnx_gem_init_phy()
1175 * the ID and the MDIO address of the detected PHY (dev_data-> in eth_xlnx_gem_init_phy()
1176 * phy_id, dev_data->phy_addr, dev_data->phy_access_api). in eth_xlnx_gem_init_phy()
1180 if (detect_rc == 0 && dev_data->phy_id != 0x00000000 && in eth_xlnx_gem_init_phy()
1181 dev_data->phy_id != 0xFFFFFFFF && in eth_xlnx_gem_init_phy()
1182 dev_data->phy_access_api != NULL) { in eth_xlnx_gem_init_phy()
1183 /* A compatible PHY was detected -> reset & configure it */ in eth_xlnx_gem_init_phy()
1184 dev_data->phy_access_api->phy_reset_func(dev); in eth_xlnx_gem_init_phy()
1185 dev_data->phy_access_api->phy_configure_func(dev); in eth_xlnx_gem_init_phy()
1187 LOG_WRN("%s no compatible PHY detected", dev->name); in eth_xlnx_gem_init_phy()
1196 * PHY, the delayed work item will be re-scheduled in order to continuously
1199 * change being propagated (carrier on/off) and / or the TX clock being
1200 * reconfigured to match the current link speed. If PHY management is dis-
1202 * the work item will not be re-scheduled and default link speed and link
1203 * state values are applied. This function refers to functionality imple-
1214 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_poll_phy()
1215 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_poll_phy()
1220 if (dev_data->phy_access_api != NULL) { in eth_xlnx_gem_poll_phy()
1222 phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev); in eth_xlnx_gem_poll_phy()
1233 link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev); in eth_xlnx_gem_poll_phy()
1237 * Link is down -> propagate to the Ethernet in eth_xlnx_gem_poll_phy()
1240 dev_data->eff_link_speed = LINK_DOWN; in eth_xlnx_gem_poll_phy()
1241 net_eth_carrier_off(dev_data->iface); in eth_xlnx_gem_poll_phy()
1243 LOG_WRN("%s link down", dev->name); in eth_xlnx_gem_poll_phy()
1249 * Therefore, the clock dividers must be ad- in eth_xlnx_gem_poll_phy()
1252 dev_data->eff_link_speed = in eth_xlnx_gem_poll_phy()
1253 dev_data->phy_access_api->phy_poll_link_speed_func(dev); in eth_xlnx_gem_poll_phy()
1257 net_eth_carrier_on(dev_data->iface); in eth_xlnx_gem_poll_phy()
1259 LOG_INF("%s link up, %s", dev->name, in eth_xlnx_gem_poll_phy()
1260 (dev_data->eff_link_speed == LINK_1GBIT) in eth_xlnx_gem_poll_phy()
1262 : (dev_data->eff_link_speed == LINK_100MBIT) in eth_xlnx_gem_poll_phy()
1264 : (dev_data->eff_link_speed == LINK_10MBIT) in eth_xlnx_gem_poll_phy()
1270 * Re-submit the delayed work using the interval from the device in eth_xlnx_gem_poll_phy()
1273 k_work_reschedule(&dev_data->phy_poll_delayed_work, in eth_xlnx_gem_poll_phy()
1274 K_MSEC(dev_conf->phy_poll_interval)); in eth_xlnx_gem_poll_phy()
1278 * supported PHY was detected -> pretend the configured max. in eth_xlnx_gem_poll_phy()
1280 * is up. The delayed work item won't be re-scheduled, as in eth_xlnx_gem_poll_phy()
1283 dev_data->eff_link_speed = dev_conf->max_link_speed; in eth_xlnx_gem_poll_phy()
1287 net_eth_carrier_on(dev_data->iface); in eth_xlnx_gem_poll_phy()
1290 "PHY detected, assuming link up at %s", dev->name, in eth_xlnx_gem_poll_phy()
1291 (dev_conf->max_link_speed == LINK_1GBIT) in eth_xlnx_gem_poll_phy()
1293 : (dev_conf->max_link_speed == LINK_100MBIT) in eth_xlnx_gem_poll_phy()
1295 : (dev_conf->max_link_speed == LINK_10MBIT) in eth_xlnx_gem_poll_phy()
1310 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_configure_buffers()
1311 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_configure_buffers()
1315 /* Initial configuration of the RX/TX BD rings */ in eth_xlnx_gem_configure_buffers()
1319 * Set initial RX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, in eth_xlnx_gem_configure_buffers()
1321 * the base RX/TX buffer pointers will be set in eth_xlnx_gem_- in eth_xlnx_gem_configure_buffers()
1324 bdptr = dev_data->rxbd_ring.first_bd; in eth_xlnx_gem_configure_buffers()
1326 for (buf_iter = 0; buf_iter < (dev_conf->rxbd_count - 1); buf_iter++) { in eth_xlnx_gem_configure_buffers()
1327 /* Clear 'used' bit -> BD is owned by the controller */ in eth_xlnx_gem_configure_buffers()
1328 bdptr->ctrl = 0; in eth_xlnx_gem_configure_buffers()
1329 bdptr->addr = (uint32_t)dev_data->first_rx_buffer + in eth_xlnx_gem_configure_buffers()
1330 (buf_iter * (uint32_t)dev_conf->rx_buffer_size); in eth_xlnx_gem_configure_buffers()
1336 * address -> this is the 'wrap' bit indicating that this is the in eth_xlnx_gem_configure_buffers()
1339 * anyways. Watch out: TX BDs handle this differently, their wrap in eth_xlnx_gem_configure_buffers()
1342 bdptr->ctrl = 0; /* BD is owned by the controller */ in eth_xlnx_gem_configure_buffers()
1343 bdptr->addr = ((uint32_t)dev_data->first_rx_buffer + in eth_xlnx_gem_configure_buffers()
1344 (buf_iter * (uint32_t)dev_conf->rx_buffer_size)) | in eth_xlnx_gem_configure_buffers()
1348 * Set initial TX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, in eth_xlnx_gem_configure_buffers()
1349 * "Transmit Buffer Descriptor List". TX BD ring data has already in eth_xlnx_gem_configure_buffers()
1352 bdptr = dev_data->txbd_ring.first_bd; in eth_xlnx_gem_configure_buffers()
1354 for (buf_iter = 0; buf_iter < (dev_conf->txbd_count - 1); buf_iter++) { in eth_xlnx_gem_configure_buffers()
1355 /* Set up the control word -> 'used' flag must be set. */ in eth_xlnx_gem_configure_buffers()
1356 bdptr->ctrl = ETH_XLNX_GEM_TXBD_USED_BIT; in eth_xlnx_gem_configure_buffers()
1357 bdptr->addr = (uint32_t)dev_data->first_tx_buffer + in eth_xlnx_gem_configure_buffers()
1358 (buf_iter * (uint32_t)dev_conf->tx_buffer_size); in eth_xlnx_gem_configure_buffers()
1364 * that this BD is the last one in the ring. -> For TX BDs, the 'wrap' in eth_xlnx_gem_configure_buffers()
1368 bdptr->ctrl = (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT); in eth_xlnx_gem_configure_buffers()
1369 bdptr->addr = (uint32_t)dev_data->first_tx_buffer + in eth_xlnx_gem_configure_buffers()
1370 (buf_iter * (uint32_t)dev_conf->tx_buffer_size); in eth_xlnx_gem_configure_buffers()
1372 /* Set free count/current index in the RX/TX BD ring data */ in eth_xlnx_gem_configure_buffers()
1373 dev_data->rxbd_ring.next_to_process = 0; in eth_xlnx_gem_configure_buffers()
1374 dev_data->rxbd_ring.next_to_use = 0; in eth_xlnx_gem_configure_buffers()
1375 dev_data->rxbd_ring.free_bds = dev_conf->rxbd_count; in eth_xlnx_gem_configure_buffers()
1376 dev_data->txbd_ring.next_to_process = 0; in eth_xlnx_gem_configure_buffers()
1377 dev_data->txbd_ring.next_to_use = 0; in eth_xlnx_gem_configure_buffers()
1378 dev_data->txbd_ring.free_bds = dev_conf->txbd_count; in eth_xlnx_gem_configure_buffers()
1380 /* Write pointers to the first RX/TX BD to the controller */ in eth_xlnx_gem_configure_buffers()
1381 sys_write32((uint32_t)dev_data->rxbd_ring.first_bd, in eth_xlnx_gem_configure_buffers()
1382 dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); in eth_xlnx_gem_configure_buffers()
1383 sys_write32((uint32_t)dev_data->txbd_ring.first_bd, in eth_xlnx_gem_configure_buffers()
1384 dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); in eth_xlnx_gem_configure_buffers()
1388 * @brief GEM RX data pending handler wrapper for the work queue
1389 * Wraps the RX data pending handler, eth_xlnx_gem_handle_rx_pending,
1391 * to defer RX pending / TX done indication handling to the system
1404 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_rx_pending_work()
1410 * @brief GEM RX data pending handler
1412 * context of the system work queue whenever the RX data pending bit
1414 * No further RX data pending interrupts will be triggered until this
1417 * data from the DMA memory area via the RX buffer descriptors and copies
1425 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_handle_rx_pending()
1426 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_handle_rx_pending()
1438 /* Read the RX status register */ in eth_xlnx_gem_handle_rx_pending()
1439 reg_val_rxsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); in eth_xlnx_gem_handle_rx_pending()
1442 * TODO Evaluate error flags from RX status register word in eth_xlnx_gem_handle_rx_pending()
1447 curr_bd_idx = dev_data->rxbd_ring.next_to_process; in eth_xlnx_gem_handle_rx_pending()
1449 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr); in eth_xlnx_gem_handle_rx_pending()
1450 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl); in eth_xlnx_gem_handle_rx_pending()
1460 * -> break out of the RX loop in eth_xlnx_gem_handle_rx_pending()
1470 LOG_ERR("%s unexpected missing SOF bit in RX BD [%u]", in eth_xlnx_gem_handle_rx_pending()
1471 dev->name, first_bd_idx); in eth_xlnx_gem_handle_rx_pending()
1482 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl); in eth_xlnx_gem_handle_rx_pending()
1487 last_bd_idx = (last_bd_idx + 1) % dev_conf->rxbd_count; in eth_xlnx_gem_handle_rx_pending()
1495 dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) % in eth_xlnx_gem_handle_rx_pending()
1496 dev_conf->rxbd_count; in eth_xlnx_gem_handle_rx_pending()
1502 pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length, in eth_xlnx_gem_handle_rx_pending()
1505 LOG_ERR("RX packet buffer alloc failed: %u bytes", in eth_xlnx_gem_handle_rx_pending()
1508 dev_data->stats.errors.rx++; in eth_xlnx_gem_handle_rx_pending()
1509 dev_data->stats.error_details.rx_no_buffer_count++; in eth_xlnx_gem_handle_rx_pending()
1514 * Copy data from all involved RX buffers into the allocated in eth_xlnx_gem_handle_rx_pending()
1515 * packet's data buffer. If we don't have a packet buffer be- in eth_xlnx_gem_handle_rx_pending()
1517 * involved BDs in order to properly release them for re-use in eth_xlnx_gem_handle_rx_pending()
1523 (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr & in eth_xlnx_gem_handle_rx_pending()
1525 (rx_data_remaining < dev_conf->rx_buffer_size) ? in eth_xlnx_gem_handle_rx_pending()
1526 rx_data_remaining : dev_conf->rx_buffer_size); in eth_xlnx_gem_handle_rx_pending()
1528 rx_data_remaining -= (rx_data_remaining < dev_conf->rx_buffer_size) ? in eth_xlnx_gem_handle_rx_pending()
1529 rx_data_remaining : dev_conf->rx_buffer_size; in eth_xlnx_gem_handle_rx_pending()
1533 * processed, on to the next BD -> preserve the RX BD's in eth_xlnx_gem_handle_rx_pending()
1536 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr); in eth_xlnx_gem_handle_rx_pending()
1541 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->rxbd_count; in eth_xlnx_gem_handle_rx_pending()
1542 } while (curr_bd_idx != ((last_bd_idx + 1) % dev_conf->rxbd_count)); in eth_xlnx_gem_handle_rx_pending()
1546 if (net_recv_data(dev_data->iface, pkt) < 0) { in eth_xlnx_gem_handle_rx_pending()
1547 LOG_ERR("%s RX packet hand-over to IP stack failed", in eth_xlnx_gem_handle_rx_pending()
1548 dev->name); in eth_xlnx_gem_handle_rx_pending()
1553 dev_data->stats.bytes.received += rx_data_length; in eth_xlnx_gem_handle_rx_pending()
1554 dev_data->stats.pkts.rx++; in eth_xlnx_gem_handle_rx_pending()
1560 /* Clear the RX status register */ in eth_xlnx_gem_handle_rx_pending()
1561 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); in eth_xlnx_gem_handle_rx_pending()
1562 /* Re-enable the frame received interrupt source */ in eth_xlnx_gem_handle_rx_pending()
1564 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); in eth_xlnx_gem_handle_rx_pending()
1568 * @brief GEM TX done handler wrapper for the work queue
1569 * Wraps the TX done handler, eth_xlnx_gem_handle_tx_done,
1571 * to defer RX pending / TX done indication handling to the system
1584 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_tx_done_work()
1590 * @brief GEM TX done handler
1592 * context of the system work queue whenever the TX done bit is set
1594 * No further TX done interrupts will be triggered until this handler
1598 * it is unblocked by posting to the current GEM's TX done semaphore
1605 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; in eth_xlnx_gem_handle_tx_done()
1606 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_handle_tx_done()
1615 /* Read the TX status register */ in eth_xlnx_gem_handle_tx_done()
1616 reg_val_txsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); in eth_xlnx_gem_handle_tx_done()
1619 * TODO Evaluate error flags from TX status register word in eth_xlnx_gem_handle_tx_done()
1623 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_handle_tx_done()
1624 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); in eth_xlnx_gem_handle_tx_done()
1627 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process; in eth_xlnx_gem_handle_tx_done()
1628 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_handle_tx_done()
1657 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; in eth_xlnx_gem_handle_tx_done()
1658 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_handle_tx_done()
1663 LOG_WRN("%s TX done handling wrapped around", dev->name); in eth_xlnx_gem_handle_tx_done()
1666 dev_data->txbd_ring.next_to_process = in eth_xlnx_gem_handle_tx_done()
1667 (dev_data->txbd_ring.next_to_process + bds_processed) % in eth_xlnx_gem_handle_tx_done()
1668 dev_conf->txbd_count; in eth_xlnx_gem_handle_tx_done()
1669 dev_data->txbd_ring.free_bds += bds_processed; in eth_xlnx_gem_handle_tx_done()
1671 if (dev_conf->defer_txd_to_queue) { in eth_xlnx_gem_handle_tx_done()
1672 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_handle_tx_done()
1675 /* Clear the TX status register */ in eth_xlnx_gem_handle_tx_done()
1676 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); in eth_xlnx_gem_handle_tx_done()
1678 /* Re-enable the TX complete interrupt source */ in eth_xlnx_gem_handle_tx_done()
1680 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); in eth_xlnx_gem_handle_tx_done()
1683 k_sem_give(&dev_data->tx_done_sem); in eth_xlnx_gem_handle_tx_done()