1 /*
2 * Xilinx Processor System Gigabit Ethernet controller (GEM) driver
3 *
4 * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Known current limitations / TODOs:
8 * - Only supports 32-bit addresses in buffer descriptors, therefore
9 * the ZynqMP APU (Cortex-A53 cores) may not be fully supported.
10 * - Hardware timestamps not considered.
11 * - VLAN tags not considered.
12 * - Wake-on-LAN interrupt not supported.
13 * - Send function is not SMP-capable (due to single TX done semaphore).
14 * - Interrupt-driven PHY management not supported - polling only.
15 * - No explicit placement of the DMA memory area(s) in either a
16 * specific memory section or at a fixed memory location yet. This
17 * is not an issue as long as the controller is used in conjunction
18 * with the Cortex-R5 QEMU target or an actual R5 running without the
19 * MPU enabled.
20 * - No detailed error handling when evaluating the Interrupt Status,
21 * RX Status and TX Status registers.
22 */
23
24 #include <zephyr/kernel.h>
25 #include <zephyr/device.h>
26 #include <zephyr/devicetree.h>
27 #include <zephyr/sys/__assert.h>
28
29 #include <zephyr/net/net_if.h>
30 #include <zephyr/net/ethernet.h>
31 #include <ethernet/eth_stats.h>
32
33 #include "eth_xlnx_gem_priv.h"
34
35 #define LOG_MODULE_NAME eth_xlnx_gem
36 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
37 #include <zephyr/logging/log.h>
38 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
39
40 static int eth_xlnx_gem_dev_init(const struct device *dev);
41 static void eth_xlnx_gem_iface_init(struct net_if *iface);
42 static void eth_xlnx_gem_isr(const struct device *dev);
43 static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt);
44 static int eth_xlnx_gem_start_device(const struct device *dev);
45 static int eth_xlnx_gem_stop_device(const struct device *dev);
46 static enum ethernet_hw_caps
47 eth_xlnx_gem_get_capabilities(const struct device *dev);
48 static int eth_xlnx_gem_get_config(const struct device *dev,
49 enum ethernet_config_type type,
50 struct ethernet_config *config);
51 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
52 static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev);
53 #endif
54
55 static void eth_xlnx_gem_reset_hw(const struct device *dev);
56 static void eth_xlnx_gem_configure_clocks(const struct device *dev);
57 static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev);
58 static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev);
59 static void eth_xlnx_gem_set_mac_address(const struct device *dev);
60 static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev);
61 static void eth_xlnx_gem_init_phy(const struct device *dev);
62 static void eth_xlnx_gem_poll_phy(struct k_work *item);
63 static void eth_xlnx_gem_configure_buffers(const struct device *dev);
64 static void eth_xlnx_gem_rx_pending_work(struct k_work *item);
65 static void eth_xlnx_gem_handle_rx_pending(const struct device *dev);
66 static void eth_xlnx_gem_tx_done_work(struct k_work *item);
67 static void eth_xlnx_gem_handle_tx_done(const struct device *dev);
68
69 static const struct ethernet_api eth_xlnx_gem_apis = {
70 .iface_api.init = eth_xlnx_gem_iface_init,
71 .get_capabilities = eth_xlnx_gem_get_capabilities,
72 .send = eth_xlnx_gem_send,
73 .start = eth_xlnx_gem_start_device,
74 .stop = eth_xlnx_gem_stop_device,
75 .get_config = eth_xlnx_gem_get_config,
76 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
77 .get_stats = eth_xlnx_gem_stats,
78 #endif
79 };
80
81 /*
82 * Insert the configuration & run-time data for all GEM instances which
83 * are enabled in the device tree of the current target board.
84 */
DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE)85 DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE)
86
87 /**
88 * @brief GEM device initialization function
89 * Initializes the GEM itself, the DMA memory area used by the GEM and,
90 * if enabled, an associated PHY attached to the GEM's MDIO interface.
91 *
92 * @param dev Pointer to the device data
93 * @retval 0 if the device initialization completed successfully
94 */
95 static int eth_xlnx_gem_dev_init(const struct device *dev)
96 {
97 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
98 uint32_t reg_val;
99
100 /* Precondition checks using assertions */
101
102 /* Valid PHY address and polling interval, if PHY is to be managed */
103 if (dev_conf->init_phy) {
104 __ASSERT((dev_conf->phy_mdio_addr_fix >= 0 &&
105 dev_conf->phy_mdio_addr_fix <= 32),
106 "%s invalid PHY address %u, must be in range "
107 "1 to 32, or 0 for auto-detection",
108 dev->name, dev_conf->phy_mdio_addr_fix);
109 __ASSERT(dev_conf->phy_poll_interval > 0,
110 "%s has an invalid zero PHY status polling "
111 "interval", dev->name);
112 }
113
114 /* Valid max. / nominal link speed value */
115 __ASSERT((dev_conf->max_link_speed == LINK_10MBIT ||
116 dev_conf->max_link_speed == LINK_100MBIT ||
117 dev_conf->max_link_speed == LINK_1GBIT),
118 "%s invalid max./nominal link speed value %u",
119 dev->name, (uint32_t)dev_conf->max_link_speed);
120
121 /* MDC clock divider validity check, SoC dependent */
122 #if defined(CONFIG_SOC_XILINX_ZYNQMP)
123 __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_48,
124 "%s invalid MDC clock divider value %u, must be in "
125 "range 0 to %u", dev->name, dev_conf->mdc_divider,
126 (uint32_t)MDC_DIVIDER_48);
127 #elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000)
128 __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_224,
129 "%s invalid MDC clock divider value %u, must be in "
130 "range 0 to %u", dev->name, dev_conf->mdc_divider,
131 (uint32_t)MDC_DIVIDER_224);
132 #endif
133
134 /* AMBA AHB configuration options */
135 __ASSERT((dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_32BIT ||
136 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_64BIT ||
137 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_128BIT),
138 "%s AMBA AHB bus width configuration is invalid",
139 dev->name);
140 __ASSERT((dev_conf->ahb_burst_length == AHB_BURST_SINGLE ||
141 dev_conf->ahb_burst_length == AHB_BURST_INCR4 ||
142 dev_conf->ahb_burst_length == AHB_BURST_INCR8 ||
143 dev_conf->ahb_burst_length == AHB_BURST_INCR16),
144 "%s AMBA AHB burst length configuration is invalid",
145 dev->name);
146
147 /* HW RX buffer size */
148 __ASSERT((dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_8KB ||
149 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_4KB ||
150 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_2KB ||
151 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_1KB),
152 "%s hardware RX buffer size configuration is invalid",
153 dev->name);
154
155 /* HW RX buffer offset */
156 __ASSERT(dev_conf->hw_rx_buffer_offset <= 3,
157 "%s hardware RX buffer offset %u is invalid, must be in "
158 "range 0 to 3", dev->name, dev_conf->hw_rx_buffer_offset);
159
160 /*
161 * RX & TX buffer sizes
162 * RX Buffer size must be a multiple of 64, as the size of the
163 * corresponding DMA receive buffer in AHB system memory is
164 * expressed as n * 64 bytes in the DMA configuration register.
165 */
166 __ASSERT(dev_conf->rx_buffer_size % 64 == 0,
167 "%s RX buffer size %u is not a multiple of 64 bytes",
168 dev->name, dev_conf->rx_buffer_size);
169 __ASSERT((dev_conf->rx_buffer_size != 0 &&
170 dev_conf->rx_buffer_size <= 16320),
171 "%s RX buffer size %u is invalid, should be >64, "
172 "must be 16320 bytes maximum.", dev->name,
173 dev_conf->rx_buffer_size);
174 __ASSERT((dev_conf->tx_buffer_size != 0 &&
175 dev_conf->tx_buffer_size <= 16380),
176 "%s TX buffer size %u is invalid, should be >64, "
177 "must be 16380 bytes maximum.", dev->name,
178 dev_conf->tx_buffer_size);
179
180 /* Checksum offloading limitations of the QEMU GEM implementation */
181 #ifdef CONFIG_QEMU_TARGET
182 __ASSERT(!dev_conf->enable_rx_chksum_offload,
183 "TCP/UDP/IP hardware checksum offloading is not "
184 "supported by the QEMU GEM implementation");
185 __ASSERT(!dev_conf->enable_tx_chksum_offload,
186 "TCP/UDP/IP hardware checksum offloading is not "
187 "supported by the QEMU GEM implementation");
188 #endif
189
190 /*
191 * Initialization procedure as described in the Zynq-7000 TRM,
192 * chapter 16.3.x.
193 */
194 eth_xlnx_gem_reset_hw(dev); /* Chapter 16.3.1 */
195 eth_xlnx_gem_set_initial_nwcfg(dev); /* Chapter 16.3.2 */
196 eth_xlnx_gem_set_mac_address(dev); /* Chapter 16.3.2 */
197 eth_xlnx_gem_set_initial_dmacr(dev); /* Chapter 16.3.2 */
198
199 /* Enable MDIO -> set gem.net_ctrl[mgmt_port_en] */
200 if (dev_conf->init_phy) {
201 reg_val = sys_read32(dev_conf->base_addr +
202 ETH_XLNX_GEM_NWCTRL_OFFSET);
203 reg_val |= ETH_XLNX_GEM_NWCTRL_MDEN_BIT;
204 sys_write32(reg_val, dev_conf->base_addr +
205 ETH_XLNX_GEM_NWCTRL_OFFSET);
206 }
207
208 eth_xlnx_gem_configure_clocks(dev); /* Chapter 16.3.3 */
209 if (dev_conf->init_phy) {
210 eth_xlnx_gem_init_phy(dev); /* Chapter 16.3.4 */
211 }
212 eth_xlnx_gem_configure_buffers(dev); /* Chapter 16.3.5 */
213
214 return 0;
215 }
216
217 /**
218 * @brief GEM associated interface initialization function
219 * Initializes the interface associated with a GEM device.
220 *
221 * @param iface Pointer to the associated interface data struct
222 */
eth_xlnx_gem_iface_init(struct net_if * iface)223 static void eth_xlnx_gem_iface_init(struct net_if *iface)
224 {
225 const struct device *dev = net_if_get_device(iface);
226 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
227 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
228
229 /* Set the initial contents of the current instance's run-time data */
230 dev_data->iface = iface;
231 net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET);
232 ethernet_init(iface);
233 net_if_carrier_off(iface);
234
235 /*
236 * Initialize the (delayed) work items for RX pending, TX done
237 * and PHY status polling handlers
238 */
239 k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work);
240 k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work);
241 k_work_init_delayable(&dev_data->phy_poll_delayed_work,
242 eth_xlnx_gem_poll_phy);
243
244 /* Initialize TX completion semaphore */
245 k_sem_init(&dev_data->tx_done_sem, 0, 1);
246
247 /*
248 * Initialize semaphores in the RX/TX BD rings which have not
249 * yet been initialized
250 */
251 k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1);
252 /* RX BD ring semaphore is not required at the time being */
253
254 /* Initialize the device's interrupt */
255 dev_conf->config_func(dev);
256
257 /* Submit initial PHY status polling delayed work */
258 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT);
259 }
260
261 /**
262 * @brief GEM interrupt service routine
263 * GEM interrupt service routine. Checks for indications of errors
264 * and either immediately handles RX pending / TX complete notifications
265 * or defers them to the system work queue.
266 *
267 * @param dev Pointer to the device data
268 */
eth_xlnx_gem_isr(const struct device * dev)269 static void eth_xlnx_gem_isr(const struct device *dev)
270 {
271 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
272 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
273 uint32_t reg_val;
274
275 /* Read the interrupt status register */
276 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
277
278 /*
279 * TODO: handling if one or more error flag(s) are set in the
280 * interrupt status register. -> For now, just log them
281 */
282 if (reg_val & ETH_XLNX_GEM_IXR_ERRORS_MASK) {
283 LOG_ERR("%s error bit(s) set in Interrupt Status Reg.: 0x%08X",
284 dev->name, reg_val);
285 }
286
287 /*
288 * Check for the following indications by the controller:
289 * reg_val & 0x00000080 -> gem.intr_status bit [7] = Frame TX complete
290 * reg_val & 0x00000002 -> gem.intr_status bit [1] = Frame received
291 * comp. Zynq-7000 TRM, Chapter B.18, p. 1289/1290.
292 * If the respective condition's handling is configured to be deferred
293 * to the work queue thread, submit the corresponding job to the work
294 * queue, otherwise, handle the condition immediately.
295 */
296 if ((reg_val & ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT) != 0) {
297 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
298 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
299 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
300 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
301 if (dev_conf->defer_txd_to_queue) {
302 k_work_submit(&dev_data->tx_done_work);
303 } else {
304 eth_xlnx_gem_handle_tx_done(dev);
305 }
306 }
307 if ((reg_val & ETH_XLNX_GEM_IXR_FRAME_RX_BIT) != 0) {
308 sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
309 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
310 sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
311 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
312 if (dev_conf->defer_rxp_to_queue) {
313 k_work_submit(&dev_data->rx_pend_work);
314 } else {
315 eth_xlnx_gem_handle_rx_pending(dev);
316 }
317 }
318
319 /*
320 * Clear all interrupt status bits so that the interrupt is de-asserted
321 * by the GEM. -> TXSR/RXSR are read/cleared by either eth_xlnx_gem_-
322 * handle_tx_done or eth_xlnx_gem_handle_rx_pending if those actions
323 * are not deferred to the system's work queue for the current inter-
324 * face. If the latter is the case, those registers will be read/
325 * cleared whenever the corresponding work item submitted from within
326 * this ISR is being processed.
327 */
328 sys_write32((0xFFFFFFFF & ~(ETH_XLNX_GEM_IXR_FRAME_RX_BIT |
329 ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT)),
330 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
331 }
332
333 /**
334 * @brief GEM data send function
335 * GEM data send function. Blocks until a TX complete notification has been
336 * received & processed.
337 *
338 * @param dev Pointer to the device data
339 * @param pkt Pointer to the data packet to be sent
340 * @retval -EINVAL in case of invalid parameters, e.g. zero data length
341 * @retval -EIO in case of:
342 * (1) the attempt to TX data while the device is stopped,
343 * the interface is down or the link is down,
344 * (2) the attempt to TX data while no free buffers are available
345 * in the DMA memory area,
346 * (3) the transmission completion notification timing out
347 * @retval 0 if the packet was transmitted successfully
348 */
eth_xlnx_gem_send(const struct device * dev,struct net_pkt * pkt)349 static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt)
350 {
351 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
352 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
353
354 uint16_t tx_data_length;
355 uint16_t tx_data_remaining;
356 void *tx_buffer_offs;
357
358 uint8_t bds_reqd;
359 uint8_t curr_bd_idx;
360 uint8_t first_bd_idx;
361
362 uint32_t reg_ctrl;
363 uint32_t reg_val;
364 int sem_status;
365
366 if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN ||
367 (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) {
368 #ifdef CONFIG_NET_STATISTICS_ETHERNET
369 dev_data->stats.tx_dropped++;
370 #endif
371 return -EIO;
372 }
373
374 tx_data_length = tx_data_remaining = net_pkt_get_len(pkt);
375 if (tx_data_length == 0) {
376 LOG_ERR("%s cannot TX, zero packet length", dev->name);
377 #ifdef CONFIG_NET_STATISTICS_ETHERNET
378 dev_data->stats.errors.tx++;
379 #endif
380 return -EINVAL;
381 }
382
383 /*
384 * Check if enough buffer descriptors are available for the amount
385 * of data to be transmitted, update the free BD count if this is
386 * the case. Update the 'next to use' BD index in the TX BD ring if
387 * sufficient space is available. If TX done handling, where the BD
388 * ring's data is accessed as well, is performed via the system work
389 * queue, protect against interruptions during the update of the BD
390 * ring's data by taking the ring's semaphore. If TX done handling
391 * is performed within the ISR, protect against interruptions by
392 * disabling the TX done interrupt source.
393 */
394 bds_reqd = (uint8_t)((tx_data_length + (dev_conf->tx_buffer_size - 1)) /
395 dev_conf->tx_buffer_size);
396
397 if (dev_conf->defer_txd_to_queue) {
398 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER);
399 } else {
400 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
401 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
402 }
403
404 if (bds_reqd > dev_data->txbd_ring.free_bds) {
405 LOG_ERR("%s cannot TX, packet length %hu requires "
406 "%hhu BDs, current free count = %hhu",
407 dev->name, tx_data_length, bds_reqd,
408 dev_data->txbd_ring.free_bds);
409
410 if (dev_conf->defer_txd_to_queue) {
411 k_sem_give(&(dev_data->txbd_ring.ring_sem));
412 } else {
413 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
414 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
415 }
416 #ifdef CONFIG_NET_STATISTICS_ETHERNET
417 dev_data->stats.tx_dropped++;
418 #endif
419 return -EIO;
420 }
421
422 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use;
423 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
424
425 dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) %
426 dev_conf->txbd_count;
427 dev_data->txbd_ring.free_bds -= bds_reqd;
428
429 if (dev_conf->defer_txd_to_queue) {
430 k_sem_give(&(dev_data->txbd_ring.ring_sem));
431 } else {
432 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
433 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
434 }
435
436 /*
437 * Scatter the contents of the network packet's buffer to
438 * one or more DMA buffers.
439 */
440 net_pkt_cursor_init(pkt);
441 do {
442 /* Calculate the base pointer of the target TX buffer */
443 tx_buffer_offs = (void *)(dev_data->first_tx_buffer +
444 (dev_conf->tx_buffer_size * curr_bd_idx));
445
446 /* Copy packet data to DMA buffer */
447 net_pkt_read(pkt, (void *)tx_buffer_offs,
448 (tx_data_remaining < dev_conf->tx_buffer_size) ?
449 tx_data_remaining : dev_conf->tx_buffer_size);
450
451 /* Update current BD's control word */
452 reg_val = sys_read32(reg_ctrl) & (ETH_XLNX_GEM_TXBD_WRAP_BIT |
453 ETH_XLNX_GEM_TXBD_USED_BIT);
454 reg_val |= (tx_data_remaining < dev_conf->tx_buffer_size) ?
455 tx_data_remaining : dev_conf->tx_buffer_size;
456 sys_write32(reg_val, reg_ctrl);
457
458 if (tx_data_remaining > dev_conf->tx_buffer_size) {
459 /* Switch to next BD */
460 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count;
461 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
462 }
463
464 tx_data_remaining -= (tx_data_remaining < dev_conf->tx_buffer_size) ?
465 tx_data_remaining : dev_conf->tx_buffer_size;
466 } while (tx_data_remaining > 0);
467
468 /* Set the 'last' bit in the current BD's control word */
469 reg_val |= ETH_XLNX_GEM_TXBD_LAST_BIT;
470
471 /*
472 * Clear the 'used' bits of all BDs involved in the current
473 * transmission. In accordance with chapter 16.3.8 of the
474 * Zynq-7000 TRM, the 'used' bits shall be cleared in reverse
475 * order, so that the 'used' bit of the first BD is cleared
476 * last just before the transmission is started.
477 */
478 reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT;
479 sys_write32(reg_val, reg_ctrl);
480
481 while (curr_bd_idx != first_bd_idx) {
482 curr_bd_idx = (curr_bd_idx != 0) ? (curr_bd_idx - 1) :
483 (dev_conf->txbd_count - 1);
484 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
485 reg_val = sys_read32(reg_ctrl);
486 reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT;
487 sys_write32(reg_val, reg_ctrl);
488 }
489
490 /* Set the start TX bit in the gem.net_ctrl register */
491 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
492 reg_val |= ETH_XLNX_GEM_NWCTRL_STARTTX_BIT;
493 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
494
495 #ifdef CONFIG_NET_STATISTICS_ETHERNET
496 dev_data->stats.bytes.sent += tx_data_length;
497 dev_data->stats.pkts.tx++;
498 #endif
499
500 /* Block until TX has completed */
501 sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100));
502 if (sem_status < 0) {
503 LOG_ERR("%s TX confirmation timed out", dev->name);
504 #ifdef CONFIG_NET_STATISTICS_ETHERNET
505 dev_data->stats.tx_timeout_count++;
506 #endif
507 return -EIO;
508 }
509
510 return 0;
511 }
512
513 /**
514 * @brief GEM device start function
515 * GEM device start function. Clears all status registers and any
516 * pending interrupts, enables RX and TX, enables interrupts. If
517 * no PHY is managed by the current driver instance, this function
518 * also declares the physical link up at the configured nominal
519 * link speed.
520 *
521 * @param dev Pointer to the device data
522 * @retval 0 upon successful completion
523 */
eth_xlnx_gem_start_device(const struct device * dev)524 static int eth_xlnx_gem_start_device(const struct device *dev)
525 {
526 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
527 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
528 uint32_t reg_val;
529
530 if (dev_data->started) {
531 return 0;
532 }
533 dev_data->started = true;
534
535 /* Disable & clear all the MAC interrupts */
536 sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
537 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
538 sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
539 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
540
541 /* Clear RX & TX status registers */
542 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
543 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
544
545 /* RX and TX enable */
546 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
547 reg_val |= (ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT);
548 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
549
550 /* Enable all the MAC interrupts */
551 sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
552 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
553
554 /* Submit the delayed work for polling the link state */
555 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) {
556 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT);
557 }
558
559 LOG_DBG("%s started", dev->name);
560 return 0;
561 }
562
563 /**
564 * @brief GEM device stop function
565 * GEM device stop function. Disables all interrupts, disables
566 * RX and TX, clears all status registers. If no PHY is managed
567 * by the current driver instance, this function also declares
568 * the physical link down.
569 *
570 * @param dev Pointer to the device data
571 * @retval 0 upon successful completion
572 */
eth_xlnx_gem_stop_device(const struct device * dev)573 static int eth_xlnx_gem_stop_device(const struct device *dev)
574 {
575 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
576 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
577 uint32_t reg_val;
578
579 if (!dev_data->started) {
580 return 0;
581 }
582 dev_data->started = false;
583
584 /* Cancel the delayed work that polls the link state */
585 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) {
586 k_work_cancel_delayable(&dev_data->phy_poll_delayed_work);
587 }
588
589 /* RX and TX disable */
590 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
591 reg_val &= (~(ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT));
592 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
593
594 /* Disable & clear all the MAC interrupts */
595 sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
596 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
597 sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
598 dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
599
600 /* Clear RX & TX status registers */
601 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
602 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
603
604 LOG_DBG("%s stopped", dev->name);
605 return 0;
606 }
607
608 /**
609 * @brief GEM capability request function
610 * Returns the capabilities of the GEM controller as an enumeration.
611 * All of the data returned is derived from the device configuration
612 * of the current GEM device instance.
613 *
614 * @param dev Pointer to the device data
615 * @return Enumeration containing the current GEM device's capabilities
616 */
eth_xlnx_gem_get_capabilities(const struct device * dev)617 static enum ethernet_hw_caps eth_xlnx_gem_get_capabilities(
618 const struct device *dev)
619 {
620 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
621 enum ethernet_hw_caps caps = (enum ethernet_hw_caps)0;
622
623 if (dev_conf->max_link_speed == LINK_1GBIT) {
624 if (dev_conf->phy_advertise_lower) {
625 caps |= (ETHERNET_LINK_1000BASE_T |
626 ETHERNET_LINK_100BASE_T |
627 ETHERNET_LINK_10BASE_T);
628 } else {
629 caps |= ETHERNET_LINK_1000BASE_T;
630 }
631 } else if (dev_conf->max_link_speed == LINK_100MBIT) {
632 if (dev_conf->phy_advertise_lower) {
633 caps |= (ETHERNET_LINK_100BASE_T |
634 ETHERNET_LINK_10BASE_T);
635 } else {
636 caps |= ETHERNET_LINK_100BASE_T;
637 }
638 } else {
639 caps |= ETHERNET_LINK_10BASE_T;
640 }
641
642 if (dev_conf->enable_rx_chksum_offload) {
643 caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD;
644 }
645
646 if (dev_conf->enable_tx_chksum_offload) {
647 caps |= ETHERNET_HW_TX_CHKSUM_OFFLOAD;
648 }
649
650 if (dev_conf->enable_fdx) {
651 caps |= ETHERNET_DUPLEX_SET;
652 }
653
654 if (dev_conf->copy_all_frames) {
655 caps |= ETHERNET_PROMISC_MODE;
656 }
657
658 return caps;
659 }
660
661 /**
662 * @brief GEM hardware configuration data request function
663 * Returns hardware configuration details of the specified device
664 * instance. Multiple hardware configuration items can be queried
665 * depending on the type parameter. The range of configuration items
666 * that can be queried is specified by the Ethernet subsystem.
667 * The queried configuration data is returned via a struct which can
668 * accommodate for all supported configuration items, to which the
669 * caller must provide a valid pointer.
670 * Currently only supports querying the RX and TX hardware checksum
671 * capabilities of the specified device instance.
672 *
673 * @param dev Pointer to the device data
674 * @param type The hardware configuration item to be queried
675 * @param config Pointer to the struct into which the queried
676 * configuration data is written.
677 * @return 0 if the specified configuration item was successfully
678 * queried, -ENOTSUP if the specified configuration item
679 * is not supported by this function.
680 */
eth_xlnx_gem_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * config)681 static int eth_xlnx_gem_get_config(const struct device *dev,
682 enum ethernet_config_type type,
683 struct ethernet_config *config)
684 {
685 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
686
687 switch (type) {
688 case ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT:
689 if (dev_conf->enable_rx_chksum_offload) {
690 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER |
691 ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER |
692 ETHERNET_CHECKSUM_SUPPORT_TCP |
693 ETHERNET_CHECKSUM_SUPPORT_UDP;
694 } else {
695 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE;
696 }
697 return 0;
698 case ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT:
699 if (dev_conf->enable_tx_chksum_offload) {
700 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER |
701 ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER |
702 ETHERNET_CHECKSUM_SUPPORT_TCP |
703 ETHERNET_CHECKSUM_SUPPORT_UDP;
704 } else {
705 config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE;
706 }
707 return 0;
708 default:
709 return -ENOTSUP;
710 };
711 }
712
713 #ifdef CONFIG_NET_STATISTICS_ETHERNET
714 /**
715 * @brief GEM statistics data request function
716 * Returns a pointer to the statistics data of the current GEM controller.
717 *
718 * @param dev Pointer to the device data
719 * @return Pointer to the current GEM device's statistics data
720 */
eth_xlnx_gem_stats(const struct device * dev)721 static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev)
722 {
723 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
724
725 return &dev_data->stats;
726 }
727 #endif
728
729 /**
730 * @brief GEM Hardware reset function
731 * Resets the current GEM device. Called from within the device
732 * initialization function.
733 *
734 * @param dev Pointer to the device data
735 */
eth_xlnx_gem_reset_hw(const struct device * dev)736 static void eth_xlnx_gem_reset_hw(const struct device *dev)
737 {
738 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
739
740 /*
741 * Controller reset sequence as described in the Zynq-7000 TRM,
742 * chapter 16.3.1.
743 */
744
745 /* Clear the NWCTRL register */
746 sys_write32(0x00000000,
747 dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
748
749 /* Clear the statistics counters */
750 sys_write32(ETH_XLNX_GEM_STATCLR_MASK,
751 dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
752
753 /* Clear the RX/TX status registers */
754 sys_write32(ETH_XLNX_GEM_TXSRCLR_MASK,
755 dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
756 sys_write32(ETH_XLNX_GEM_RXSRCLR_MASK,
757 dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
758
759 /* Disable all interrupts */
760 sys_write32(ETH_XLNX_GEM_IDRCLR_MASK,
761 dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
762
763 /* Clear the buffer queues */
764 sys_write32(0x00000000,
765 dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET);
766 sys_write32(0x00000000,
767 dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET);
768 }
769
770 /**
771 * @brief GEM clock configuration function
772 * Calculates the pre-scalers for the TX clock to match the current
773 * (if an associated PHY is managed) or nominal link speed. Called
774 * from within the device initialization function.
775 *
776 * @param dev Pointer to the device data
777 */
eth_xlnx_gem_configure_clocks(const struct device * dev)778 static void eth_xlnx_gem_configure_clocks(const struct device *dev)
779 {
780 /*
781 * Clock source configuration for the respective GEM as described
782 * in the Zynq-7000 TRM, chapter 16.3.3, is not tackled here. This
783 * is performed by the PS7Init code. Only the DIVISOR and DIVISOR1
784 * values for the respective GEM's TX clock are calculated here.
785 */
786
787 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
788 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
789
790 uint32_t div0;
791 uint32_t div1;
792 uint32_t target = 2500000; /* default prevents 'may be uninitialized' warning */
793 uint32_t tmp;
794 uint32_t clk_ctrl_reg;
795
796 if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) {
797 /*
798 * Run-time data indicates 'link down' or PHY management
799 * is disabled for the current device -> this indicates the
800 * initial device initialization. Once the PHY status polling
801 * delayed work handler has picked up the result of the auto-
802 * negotiation (if enabled), this if-statement will evaluate
803 * to false.
804 */
805 if (dev_conf->max_link_speed == LINK_10MBIT) {
806 target = 2500000; /* Target frequency: 2.5 MHz */
807 } else if (dev_conf->max_link_speed == LINK_100MBIT) {
808 target = 25000000; /* Target frequency: 25 MHz */
809 } else if (dev_conf->max_link_speed == LINK_1GBIT) {
810 target = 125000000; /* Target frequency: 125 MHz */
811 }
812 } else if (dev_data->eff_link_speed != LINK_DOWN) {
813 /*
814 * Use the effective link speed instead of the maximum/nominal
815 * link speed for clock configuration.
816 */
817 if (dev_data->eff_link_speed == LINK_10MBIT) {
818 target = 2500000; /* Target frequency: 2.5 MHz */
819 } else if (dev_data->eff_link_speed == LINK_100MBIT) {
820 target = 25000000; /* Target frequency: 25 MHz */
821 } else if (dev_data->eff_link_speed == LINK_1GBIT) {
822 target = 125000000; /* Target frequency: 125 MHz */
823 }
824 }
825
826 /*
827 * Calculate the divisors for the target frequency.
828 * The frequency of the PLL to which the divisors shall be applied are
829 * provided in the respective GEM's device tree data.
830 */
831 for (div0 = 1; div0 < 64; div0++) {
832 for (div1 = 1; div1 < 64; div1++) {
833 tmp = ((dev_conf->pll_clock_frequency / div0) / div1);
834 if (tmp >= (target - 10) && tmp <= (target + 10)) {
835 break;
836 }
837 }
838 if (tmp >= (target - 10) && tmp <= (target + 10)) {
839 break;
840 }
841 }
842
843 #if defined(CONFIG_SOC_XILINX_ZYNQMP)
844 /*
845 * ZynqMP register crl_apb.GEMx_REF_CTRL:
846 * RX_CLKACT bit [26]
847 * CLKACT bit [25]
848 * div0 bits [13..8], div1 bits [21..16]
849 * Unlock CRL_APB write access if the write protect bit
850 * is currently set, restore it afterwards.
851 */
852 clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address);
853 clk_ctrl_reg &= ~((ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK <<
854 ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) |
855 (ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK <<
856 ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT));
857 clk_ctrl_reg |= ((div0 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) <<
858 ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) |
859 ((div1 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) <<
860 ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT);
861 clk_ctrl_reg |= ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT |
862 ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT;
863
864 /*
865 * Unlock CRL_APB write access if the write protect bit
866 * is currently set, restore it afterwards.
867 */
868 tmp = sys_read32(ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
869 if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) {
870 sys_write32((tmp & ~ETH_XLNX_CRL_APB_WPROT_BIT),
871 ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
872 }
873 sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address);
874 if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) {
875 sys_write32(tmp, ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
876 }
877 # elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000)
878 clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address);
879 clk_ctrl_reg &= ~((ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK <<
880 ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) |
881 (ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK <<
882 ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT));
883 clk_ctrl_reg |= ((div0 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) <<
884 ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) |
885 ((div1 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) <<
886 ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT);
887
888 sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address);
889 #endif /* CONFIG_SOC_XILINX_ZYNQMP / CONFIG_SOC_FAMILY_XILINX_ZYNQ7000 */
890
891 LOG_DBG("%s set clock dividers div0/1 %u/%u for target "
892 "frequency %u Hz", dev->name, div0, div1, target);
893 }
894
895 /**
896 * @brief GEM initial Network Configuration Register setup function
897 * Writes the contents of the current GEM device's Network Configuration
898 * Register (NWCFG / gem.net_cfg). Called from within the device
899 * initialization function. Implementation differs depending on whether
900 * the current target is a Zynq-7000 or a ZynqMP.
901 *
902 * @param dev Pointer to the device data
903 */
eth_xlnx_gem_set_initial_nwcfg(const struct device * dev)904 static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev)
905 {
906 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
907 uint32_t reg_val = 0;
908
909 if (dev_conf->ignore_ipg_rxer) {
910 /* [30] ignore IPG rx_er */
911 reg_val |= ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT;
912 }
913 if (dev_conf->disable_reject_nsp) {
914 /* [29] disable rejection of non-standard preamble */
915 reg_val |= ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT;
916 }
917 if (dev_conf->enable_ipg_stretch) {
918 /* [28] enable IPG stretch */
919 reg_val |= ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT;
920 }
921 if (dev_conf->enable_sgmii_mode) {
922 /* [27] SGMII mode enable */
923 reg_val |= ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT;
924 }
925 if (dev_conf->disable_reject_fcs_crc_errors) {
926 /* [26] disable rejection of FCS/CRC errors */
927 reg_val |= ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT;
928 }
929 if (dev_conf->enable_rx_halfdup_while_tx) {
930 /* [25] RX half duplex while TX enable */
931 reg_val |= ETH_XLNX_GEM_NWCFG_HDRXEN_BIT;
932 }
933 if (dev_conf->enable_rx_chksum_offload) {
934 /* [24] enable RX IP/TCP/UDP checksum offload */
935 reg_val |= ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT;
936 }
937 if (dev_conf->disable_pause_copy) {
938 /* [23] Do not copy pause Frames to memory */
939 reg_val |= ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT;
940 }
941 /* [22..21] Data bus width */
942 reg_val |= (((uint32_t)(dev_conf->amba_dbus_width) &
943 ETH_XLNX_GEM_NWCFG_DBUSW_MASK) <<
944 ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT);
945 /* [20..18] MDC clock divider */
946 reg_val |= (((uint32_t)dev_conf->mdc_divider &
947 ETH_XLNX_GEM_NWCFG_MDC_MASK) <<
948 ETH_XLNX_GEM_NWCFG_MDC_SHIFT);
949 if (dev_conf->discard_rx_fcs) {
950 /* [17] Discard FCS from received frames */
951 reg_val |= ETH_XLNX_GEM_NWCFG_FCSREM_BIT;
952 }
953 if (dev_conf->discard_rx_length_errors) {
954 /* [16] RX length error discard */
955 reg_val |= ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT;
956 }
957 /* [15..14] RX buffer offset */
958 reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_offset &
959 ETH_XLNX_GEM_NWCFG_RXOFFS_MASK) <<
960 ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT);
961 if (dev_conf->enable_pause) {
962 /* [13] Enable pause TX */
963 reg_val |= ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT;
964 }
965 if (dev_conf->enable_tbi) {
966 /* [11] enable TBI instead of GMII/MII */
967 reg_val |= ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT;
968 }
969 if (dev_conf->ext_addr_match) {
970 /* [09] External address match enable */
971 reg_val |= ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT;
972 }
973 if (dev_conf->enable_1536_frames) {
974 /* [08] Enable 1536 byte frames reception */
975 reg_val |= ETH_XLNX_GEM_NWCFG_1536RXEN_BIT;
976 }
977 if (dev_conf->enable_ucast_hash) {
978 /* [07] Receive unicast hash frames */
979 reg_val |= ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT;
980 }
981 if (dev_conf->enable_mcast_hash) {
982 /* [06] Receive multicast hash frames */
983 reg_val |= ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT;
984 }
985 if (dev_conf->disable_bcast) {
986 /* [05] Do not receive broadcast frames */
987 reg_val |= ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT;
988 }
989 if (dev_conf->copy_all_frames) {
990 /* [04] Copy all frames */
991 reg_val |= ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT;
992 }
993 if (dev_conf->discard_non_vlan) {
994 /* [02] Receive only VLAN frames */
995 reg_val |= ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT;
996 }
997 if (dev_conf->enable_fdx) {
998 /* [01] enable Full duplex */
999 reg_val |= ETH_XLNX_GEM_NWCFG_FDEN_BIT;
1000 }
1001 if (dev_conf->max_link_speed == LINK_100MBIT) {
1002 /* [00] 10 or 100 Mbps */
1003 reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT;
1004 } else if (dev_conf->max_link_speed == LINK_1GBIT) {
1005 /* [10] Gigabit mode enable */
1006 reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT;
1007 }
1008 /*
1009 * No else-branch for 10Mbit/s mode:
1010 * in 10 Mbit/s mode, both bits [00] and [10] remain 0
1011 */
1012
1013 /* Write the assembled register contents to gem.net_cfg */
1014 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
1015 }
1016
1017 /**
1018 * @brief GEM Network Configuration Register link speed update function
1019 * Updates only the link speed-related bits of the Network Configuration
1020 * register. This is called from within #eth_xlnx_gem_poll_phy.
1021 *
1022 * @param dev Pointer to the device data
1023 */
eth_xlnx_gem_set_nwcfg_link_speed(const struct device * dev)1024 static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev)
1025 {
1026 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1027 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1028 uint32_t reg_val;
1029
1030 /*
1031 * Read the current gem.net_cfg register contents and mask out
1032 * the link speed-related bits
1033 */
1034 reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
1035 reg_val &= ~(ETH_XLNX_GEM_NWCFG_1000_BIT | ETH_XLNX_GEM_NWCFG_100_BIT);
1036
1037 /* No bits to set for 10 Mbps. 100 Mbps and 1 Gbps set one bit each. */
1038 if (dev_data->eff_link_speed == LINK_100MBIT) {
1039 reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT;
1040 } else if (dev_data->eff_link_speed == LINK_1GBIT) {
1041 reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT;
1042 }
1043
1044 /* Write the assembled register contents to gem.net_cfg */
1045 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
1046 }
1047
1048 /**
1049 * @brief GEM MAC address setup function
1050 * Acquires the MAC address to be assigned to the current GEM device
1051 * from the device configuration data which in turn acquires it from
1052 * the device tree data, then writes it to the gem.spec_addr1_bot/LADDR1L
1053 * and gem.spec_addr1_top/LADDR1H registers. Called from within the device
1054 * initialization function.
1055 *
1056 * @param dev Pointer to the device data
1057 */
eth_xlnx_gem_set_mac_address(const struct device * dev)1058 static void eth_xlnx_gem_set_mac_address(const struct device *dev)
1059 {
1060 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1061 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1062 uint32_t regval_top;
1063 uint32_t regval_bot;
1064
1065 regval_bot = (dev_data->mac_addr[0] & 0xFF);
1066 regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8;
1067 regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16;
1068 regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24;
1069
1070 regval_top = (dev_data->mac_addr[4] & 0xFF);
1071 regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8;
1072
1073 sys_write32(regval_bot, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1L_OFFSET);
1074 sys_write32(regval_top, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1H_OFFSET);
1075
1076 LOG_DBG("%s MAC %02X:%02X:%02X:%02X:%02X:%02X",
1077 dev->name,
1078 dev_data->mac_addr[0],
1079 dev_data->mac_addr[1],
1080 dev_data->mac_addr[2],
1081 dev_data->mac_addr[3],
1082 dev_data->mac_addr[4],
1083 dev_data->mac_addr[5]);
1084 }
1085
1086 /**
1087 * @brief GEM initial DMA Control Register setup function
1088 * Writes the contents of the current GEM device's DMA Control Register
1089 * (DMACR / gem.dma_cfg). Called from within the device initialization
1090 * function.
1091 *
1092 * @param dev Pointer to the device data
1093 */
eth_xlnx_gem_set_initial_dmacr(const struct device * dev)1094 static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev)
1095 {
1096 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1097 uint32_t reg_val = 0;
1098
1099 /*
1100 * gem.dma_cfg register bit (field) definitions:
1101 * comp. Zynq-7000 TRM, p. 1278 ff.
1102 */
1103
1104 if (dev_conf->disc_rx_ahb_unavail) {
1105 /* [24] Discard RX packet when AHB unavailable */
1106 reg_val |= ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT;
1107 }
1108 /*
1109 * [23..16] DMA RX buffer size in AHB system memory
1110 * e.g.: 0x02 = 128, 0x18 = 1536, 0xA0 = 10240
1111 */
1112 reg_val |= (((dev_conf->rx_buffer_size / 64) &
1113 ETH_XLNX_GEM_DMACR_RX_BUF_MASK) <<
1114 ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT);
1115 if (dev_conf->enable_tx_chksum_offload) {
1116 /* [11] TX TCP/UDP/IP checksum offload to GEM */
1117 reg_val |= ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT;
1118 }
1119 if (dev_conf->tx_buffer_size_full) {
1120 /* [10] TX buffer memory size select */
1121 reg_val |= ETH_XLNX_GEM_DMACR_TX_SIZE_BIT;
1122 }
1123 /*
1124 * [09..08] RX packet buffer memory size select
1125 * 0 = 1kB, 1 = 2kB, 2 = 4kB, 3 = 8kB
1126 */
1127 reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_size <<
1128 ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT) &
1129 ETH_XLNX_GEM_DMACR_RX_SIZE_MASK);
1130 if (dev_conf->enable_ahb_packet_endian_swap) {
1131 /* [07] AHB packet data endian swap enable */
1132 reg_val |= ETH_XLNX_GEM_DMACR_ENDIAN_BIT;
1133 }
1134 if (dev_conf->enable_ahb_md_endian_swap) {
1135 /* [06] AHB mgmt descriptor endian swap enable */
1136 reg_val |= ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT;
1137 }
1138 /*
1139 * [04..00] AHB fixed burst length for DMA ops.
1140 * 00001 = single AHB bursts,
1141 * 001xx = attempt to use INCR4 bursts,
1142 * 01xxx = attempt to use INCR8 bursts,
1143 * 1xxxx = attempt to use INCR16 bursts
1144 */
1145 reg_val |= ((uint32_t)dev_conf->ahb_burst_length &
1146 ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK);
1147
1148 /* Write the assembled register contents */
1149 sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_DMACR_OFFSET);
1150 }
1151
1152 /**
1153 * @brief GEM associated PHY detection and setup function
1154 * If the current GEM device shall manage an associated PHY, its detection
1155 * and configuration is performed from within this function. Called from
1156 * within the device initialization function. This function refers to
1157 * functionality implemented in the phy_xlnx_gem module.
1158 *
1159 * @param dev Pointer to the device data
1160 */
eth_xlnx_gem_init_phy(const struct device * dev)1161 static void eth_xlnx_gem_init_phy(const struct device *dev)
1162 {
1163 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1164 int detect_rc;
1165
1166 LOG_DBG("%s attempting to initialize associated PHY", dev->name);
1167
1168 /*
1169 * The phy_xlnx_gem_detect function checks if a valid PHY
1170 * ID is returned when reading the corresponding high / low
1171 * ID registers for all valid MDIO addresses. If a compatible
1172 * PHY is detected, the function writes a pointer to the
1173 * vendor-specific implementations of the PHY management
1174 * functions to the run-time device data struct, along with
1175 * the ID and the MDIO address of the detected PHY (dev_data->
1176 * phy_id, dev_data->phy_addr, dev_data->phy_access_api).
1177 */
1178 detect_rc = phy_xlnx_gem_detect(dev);
1179
1180 if (detect_rc == 0 && dev_data->phy_id != 0x00000000 &&
1181 dev_data->phy_id != 0xFFFFFFFF &&
1182 dev_data->phy_access_api != NULL) {
1183 /* A compatible PHY was detected -> reset & configure it */
1184 dev_data->phy_access_api->phy_reset_func(dev);
1185 dev_data->phy_access_api->phy_configure_func(dev);
1186 } else {
1187 LOG_WRN("%s no compatible PHY detected", dev->name);
1188 }
1189 }
1190
1191 /**
1192 * @brief GEM associated PHY status polling function
1193 * This handler of a delayed work item is called from the context of
1194 * the system work queue. It is always scheduled at least once during the
1195 * interface initialization. If the current driver instance manages a
1196 * PHY, the delayed work item will be re-scheduled in order to continuously
1197 * monitor the link state and speed while the device is active. Link state
1198 * and link speed changes are polled, which may result in the link state
1199 * change being propagated (carrier on/off) and / or the TX clock being
1200 * reconfigured to match the current link speed. If PHY management is dis-
1201 * abled for the current driver instance or no compatible PHY was detected,
1202 * the work item will not be re-scheduled and default link speed and link
1203 * state values are applied. This function refers to functionality imple-
1204 * mented in the phy_xlnx_gem module.
1205 *
1206 * @param work Pointer to the delayed work item which facilitates
1207 * access to the current device's configuration data
1208 */
eth_xlnx_gem_poll_phy(struct k_work * work)1209 static void eth_xlnx_gem_poll_phy(struct k_work *work)
1210 {
1211 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1212 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(dwork,
1213 struct eth_xlnx_gem_dev_data, phy_poll_delayed_work);
1214 const struct device *dev = net_if_get_device(dev_data->iface);
1215 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1216
1217 uint16_t phy_status;
1218 uint8_t link_status;
1219
1220 if (dev_data->phy_access_api != NULL) {
1221 /* A supported PHY is managed by the driver */
1222 phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev);
1223
1224 if ((phy_status & (
1225 PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED |
1226 PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED |
1227 PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE)) != 0) {
1228
1229 /*
1230 * Get the PHY's link status. Handling a 'link down'
1231 * event the simplest possible case.
1232 */
1233 link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev);
1234
1235 if (link_status == 0) {
1236 /*
1237 * Link is down -> propagate to the Ethernet
1238 * layer that the link has gone down.
1239 */
1240 dev_data->eff_link_speed = LINK_DOWN;
1241 net_eth_carrier_off(dev_data->iface);
1242
1243 LOG_WRN("%s link down", dev->name);
1244 } else {
1245 /*
1246 * A link has been detected, which, depending
1247 * on the driver's configuration, might have
1248 * a different speed than the previous link.
1249 * Therefore, the clock dividers must be ad-
1250 * justed accordingly.
1251 */
1252 dev_data->eff_link_speed =
1253 dev_data->phy_access_api->phy_poll_link_speed_func(dev);
1254
1255 eth_xlnx_gem_configure_clocks(dev);
1256 eth_xlnx_gem_set_nwcfg_link_speed(dev);
1257 net_eth_carrier_on(dev_data->iface);
1258
1259 LOG_INF("%s link up, %s", dev->name,
1260 (dev_data->eff_link_speed == LINK_1GBIT)
1261 ? "1 GBit/s"
1262 : (dev_data->eff_link_speed == LINK_100MBIT)
1263 ? "100 MBit/s"
1264 : (dev_data->eff_link_speed == LINK_10MBIT)
1265 ? "10 MBit/s" : "undefined / link down");
1266 }
1267 }
1268
1269 /*
1270 * Re-submit the delayed work using the interval from the device
1271 * configuration data.
1272 */
1273 k_work_reschedule(&dev_data->phy_poll_delayed_work,
1274 K_MSEC(dev_conf->phy_poll_interval));
1275 } else {
1276 /*
1277 * The current driver instance doesn't manage a PHY or no
1278 * supported PHY was detected -> pretend the configured max.
1279 * link speed is the effective link speed and that the link
1280 * is up. The delayed work item won't be re-scheduled, as
1281 * there isn't anything to poll for.
1282 */
1283 dev_data->eff_link_speed = dev_conf->max_link_speed;
1284
1285 eth_xlnx_gem_configure_clocks(dev);
1286 eth_xlnx_gem_set_nwcfg_link_speed(dev);
1287 net_eth_carrier_on(dev_data->iface);
1288
1289 LOG_WRN("%s PHY not managed by the driver or no compatible "
1290 "PHY detected, assuming link up at %s", dev->name,
1291 (dev_conf->max_link_speed == LINK_1GBIT)
1292 ? "1 GBit/s"
1293 : (dev_conf->max_link_speed == LINK_100MBIT)
1294 ? "100 MBit/s"
1295 : (dev_conf->max_link_speed == LINK_10MBIT)
1296 ? "10 MBit/s" : "undefined");
1297 }
1298 }
1299
1300 /**
1301 * @brief GEM DMA memory area setup function
1302 * Sets up the DMA memory area to be used by the current GEM device.
1303 * Called from within the device initialization function or from within
1304 * the context of the PHY status polling delayed work handler.
1305 *
1306 * @param dev Pointer to the device data
1307 */
eth_xlnx_gem_configure_buffers(const struct device * dev)1308 static void eth_xlnx_gem_configure_buffers(const struct device *dev)
1309 {
1310 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1311 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1312 struct eth_xlnx_gem_bd *bdptr;
1313 uint32_t buf_iter;
1314
1315 /* Initial configuration of the RX/TX BD rings */
1316 DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INIT_BD_RING)
1317
1318 /*
1319 * Set initial RX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5,
1320 * "Receive Buffer Descriptor List". The BD ring data other than
1321 * the base RX/TX buffer pointers will be set in eth_xlnx_gem_-
1322 * iface_init()
1323 */
1324 bdptr = dev_data->rxbd_ring.first_bd;
1325
1326 for (buf_iter = 0; buf_iter < (dev_conf->rxbd_count - 1); buf_iter++) {
1327 /* Clear 'used' bit -> BD is owned by the controller */
1328 bdptr->ctrl = 0;
1329 bdptr->addr = (uint32_t)dev_data->first_rx_buffer +
1330 (buf_iter * (uint32_t)dev_conf->rx_buffer_size);
1331 ++bdptr;
1332 }
1333
1334 /*
1335 * For the last BD, bit [1] must be OR'ed in the buffer memory
1336 * address -> this is the 'wrap' bit indicating that this is the
1337 * last BD in the ring. This location is used as bits [1..0] can't
1338 * be part of the buffer address due to alignment requirements
1339 * anyways. Watch out: TX BDs handle this differently, their wrap
1340 * bit is located in the BD's control word!
1341 */
1342 bdptr->ctrl = 0; /* BD is owned by the controller */
1343 bdptr->addr = ((uint32_t)dev_data->first_rx_buffer +
1344 (buf_iter * (uint32_t)dev_conf->rx_buffer_size)) |
1345 ETH_XLNX_GEM_RXBD_WRAP_BIT;
1346
1347 /*
1348 * Set initial TX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5,
1349 * "Transmit Buffer Descriptor List". TX BD ring data has already
1350 * been set up in eth_xlnx_gem_iface_init()
1351 */
1352 bdptr = dev_data->txbd_ring.first_bd;
1353
1354 for (buf_iter = 0; buf_iter < (dev_conf->txbd_count - 1); buf_iter++) {
1355 /* Set up the control word -> 'used' flag must be set. */
1356 bdptr->ctrl = ETH_XLNX_GEM_TXBD_USED_BIT;
1357 bdptr->addr = (uint32_t)dev_data->first_tx_buffer +
1358 (buf_iter * (uint32_t)dev_conf->tx_buffer_size);
1359 ++bdptr;
1360 }
1361
1362 /*
1363 * For the last BD, set the 'wrap' bit indicating to the controller
1364 * that this BD is the last one in the ring. -> For TX BDs, the 'wrap'
1365 * bit isn't located in the address word, but in the control word
1366 * instead
1367 */
1368 bdptr->ctrl = (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT);
1369 bdptr->addr = (uint32_t)dev_data->first_tx_buffer +
1370 (buf_iter * (uint32_t)dev_conf->tx_buffer_size);
1371
1372 /* Set free count/current index in the RX/TX BD ring data */
1373 dev_data->rxbd_ring.next_to_process = 0;
1374 dev_data->rxbd_ring.next_to_use = 0;
1375 dev_data->rxbd_ring.free_bds = dev_conf->rxbd_count;
1376 dev_data->txbd_ring.next_to_process = 0;
1377 dev_data->txbd_ring.next_to_use = 0;
1378 dev_data->txbd_ring.free_bds = dev_conf->txbd_count;
1379
1380 /* Write pointers to the first RX/TX BD to the controller */
1381 sys_write32((uint32_t)dev_data->rxbd_ring.first_bd,
1382 dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET);
1383 sys_write32((uint32_t)dev_data->txbd_ring.first_bd,
1384 dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET);
1385 }
1386
1387 /**
1388 * @brief GEM RX data pending handler wrapper for the work queue
1389 * Wraps the RX data pending handler, eth_xlnx_gem_handle_rx_pending,
1390 * for the scenario in which the current GEM device is configured
1391 * to defer RX pending / TX done indication handling to the system
1392 * work queue. In this case, the work item received by this wrapper
1393 * function will be enqueued from within the ISR if the corresponding
1394 * bit is set within the controller's interrupt status register
1395 * (gem.intr_status).
1396 *
1397 * @param item Pointer to the work item enqueued by the ISR which
1398 * facilitates access to the current device's data
1399 */
eth_xlnx_gem_rx_pending_work(struct k_work * item)1400 static void eth_xlnx_gem_rx_pending_work(struct k_work *item)
1401 {
1402 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item,
1403 struct eth_xlnx_gem_dev_data, rx_pend_work);
1404 const struct device *dev = net_if_get_device(dev_data->iface);
1405
1406 eth_xlnx_gem_handle_rx_pending(dev);
1407 }
1408
1409 /**
1410 * @brief GEM RX data pending handler
1411 * This handler is called either from within the ISR or from the
1412 * context of the system work queue whenever the RX data pending bit
1413 * is set in the controller's interrupt status register (gem.intr_status).
1414 * No further RX data pending interrupts will be triggered until this
1415 * handler has been executed, which eventually clears the corresponding
1416 * interrupt status bit. This function acquires the incoming packet
1417 * data from the DMA memory area via the RX buffer descriptors and copies
1418 * the data to a packet which will then be handed over to the network
1419 * stack.
1420 *
1421 * @param dev Pointer to the device data
1422 */
eth_xlnx_gem_handle_rx_pending(const struct device * dev)1423 static void eth_xlnx_gem_handle_rx_pending(const struct device *dev)
1424 {
1425 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1426 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1427 uint32_t reg_addr;
1428 uint32_t reg_ctrl;
1429 uint32_t reg_val;
1430 uint32_t reg_val_rxsr;
1431 uint8_t first_bd_idx;
1432 uint8_t last_bd_idx;
1433 uint8_t curr_bd_idx;
1434 uint32_t rx_data_length;
1435 uint32_t rx_data_remaining;
1436 struct net_pkt *pkt;
1437
1438 /* Read the RX status register */
1439 reg_val_rxsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
1440
1441 /*
1442 * TODO Evaluate error flags from RX status register word
1443 * here for proper error handling.
1444 */
1445
1446 while (1) {
1447 curr_bd_idx = dev_data->rxbd_ring.next_to_process;
1448 first_bd_idx = last_bd_idx = curr_bd_idx;
1449 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr);
1450 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl);
1451
1452 /*
1453 * Basic precondition checks for the current BD's
1454 * address and control words
1455 */
1456 reg_val = sys_read32(reg_addr);
1457 if ((reg_val & ETH_XLNX_GEM_RXBD_USED_BIT) == 0) {
1458 /*
1459 * No new data contained in the current BD
1460 * -> break out of the RX loop
1461 */
1462 break;
1463 }
1464 reg_val = sys_read32(reg_ctrl);
1465 if ((reg_val & ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT) == 0) {
1466 /*
1467 * Although the current BD is marked as 'used', it
1468 * doesn't contain the SOF bit.
1469 */
1470 LOG_ERR("%s unexpected missing SOF bit in RX BD [%u]",
1471 dev->name, first_bd_idx);
1472 break;
1473 }
1474
1475 /*
1476 * As long as the current BD doesn't have the EOF bit set,
1477 * iterate forwards until the EOF bit is encountered. Only
1478 * the BD containing the EOF bit also contains the length
1479 * of the received packet which spans multiple buffers.
1480 */
1481 do {
1482 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl);
1483 reg_val = sys_read32(reg_ctrl);
1484 rx_data_length = rx_data_remaining =
1485 (reg_val & ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK);
1486 if ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0) {
1487 last_bd_idx = (last_bd_idx + 1) % dev_conf->rxbd_count;
1488 }
1489 } while ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0);
1490
1491 /*
1492 * Store the position of the first BD behind the end of the
1493 * frame currently being processed as 'next to process'
1494 */
1495 dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) %
1496 dev_conf->rxbd_count;
1497
1498 /*
1499 * Allocate a destination packet from the network stack
1500 * now that the total frame length is known.
1501 */
1502 pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length,
1503 AF_UNSPEC, 0, K_NO_WAIT);
1504 if (pkt == NULL) {
1505 LOG_ERR("RX packet buffer alloc failed: %u bytes",
1506 rx_data_length);
1507 #ifdef CONFIG_NET_STATISTICS_ETHERNET
1508 dev_data->stats.errors.rx++;
1509 dev_data->stats.error_details.rx_no_buffer_count++;
1510 #endif
1511 }
1512
1513 /*
1514 * Copy data from all involved RX buffers into the allocated
1515 * packet's data buffer. If we don't have a packet buffer be-
1516 * cause none are available, we still have to iterate over all
1517 * involved BDs in order to properly release them for re-use
1518 * by the controller.
1519 */
1520 do {
1521 if (pkt != NULL) {
1522 net_pkt_write(pkt, (const void *)
1523 (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr &
1524 ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK),
1525 (rx_data_remaining < dev_conf->rx_buffer_size) ?
1526 rx_data_remaining : dev_conf->rx_buffer_size);
1527 }
1528 rx_data_remaining -= (rx_data_remaining < dev_conf->rx_buffer_size) ?
1529 rx_data_remaining : dev_conf->rx_buffer_size;
1530
1531 /*
1532 * The entire packet data of the current BD has been
1533 * processed, on to the next BD -> preserve the RX BD's
1534 * 'wrap' bit & address, but clear the 'used' bit.
1535 */
1536 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr);
1537 reg_val = sys_read32(reg_addr);
1538 reg_val &= ~ETH_XLNX_GEM_RXBD_USED_BIT;
1539 sys_write32(reg_val, reg_addr);
1540
1541 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->rxbd_count;
1542 } while (curr_bd_idx != ((last_bd_idx + 1) % dev_conf->rxbd_count));
1543
1544 /* Propagate the received packet to the network stack */
1545 if (pkt != NULL) {
1546 if (net_recv_data(dev_data->iface, pkt) < 0) {
1547 LOG_ERR("%s RX packet hand-over to IP stack failed",
1548 dev->name);
1549 net_pkt_unref(pkt);
1550 }
1551 #ifdef CONFIG_NET_STATISTICS_ETHERNET
1552 else {
1553 dev_data->stats.bytes.received += rx_data_length;
1554 dev_data->stats.pkts.rx++;
1555 }
1556 #endif
1557 }
1558 }
1559
1560 /* Clear the RX status register */
1561 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
1562 /* Re-enable the frame received interrupt source */
1563 sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
1564 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
1565 }
1566
1567 /**
1568 * @brief GEM TX done handler wrapper for the work queue
1569 * Wraps the TX done handler, eth_xlnx_gem_handle_tx_done,
1570 * for the scenario in which the current GEM device is configured
1571 * to defer RX pending / TX done indication handling to the system
1572 * work queue. In this case, the work item received by this wrapper
1573 * function will be enqueued from within the ISR if the corresponding
1574 * bit is set within the controller's interrupt status register
1575 * (gem.intr_status).
1576 *
1577 * @param item Pointer to the work item enqueued by the ISR which
1578 * facilitates access to the current device's data
1579 */
eth_xlnx_gem_tx_done_work(struct k_work * item)1580 static void eth_xlnx_gem_tx_done_work(struct k_work *item)
1581 {
1582 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item,
1583 struct eth_xlnx_gem_dev_data, tx_done_work);
1584 const struct device *dev = net_if_get_device(dev_data->iface);
1585
1586 eth_xlnx_gem_handle_tx_done(dev);
1587 }
1588
1589 /**
1590 * @brief GEM TX done handler
1591 * This handler is called either from within the ISR or from the
1592 * context of the system work queue whenever the TX done bit is set
1593 * in the controller's interrupt status register (gem.intr_status).
1594 * No further TX done interrupts will be triggered until this handler
1595 * has been executed, which eventually clears the corresponding
1596 * interrupt status bit. Once this handler reaches the end of its
1597 * execution, the eth_xlnx_gem_send call which effectively triggered
1598 * it is unblocked by posting to the current GEM's TX done semaphore
1599 * on which the send function is blocking.
1600 *
1601 * @param dev Pointer to the device data
1602 */
eth_xlnx_gem_handle_tx_done(const struct device * dev)1603 static void eth_xlnx_gem_handle_tx_done(const struct device *dev)
1604 {
1605 const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1606 struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1607 uint32_t reg_ctrl;
1608 uint32_t reg_val;
1609 uint32_t reg_val_txsr;
1610 uint8_t curr_bd_idx;
1611 uint8_t first_bd_idx;
1612 uint8_t bds_processed = 0;
1613 uint8_t bd_is_last;
1614
1615 /* Read the TX status register */
1616 reg_val_txsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
1617
1618 /*
1619 * TODO Evaluate error flags from TX status register word
1620 * here for proper error handling
1621 */
1622
1623 if (dev_conf->defer_txd_to_queue) {
1624 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER);
1625 }
1626
1627 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process;
1628 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
1629 reg_val = sys_read32(reg_ctrl);
1630
1631 do {
1632 ++bds_processed;
1633
1634 /*
1635 * TODO Evaluate error flags from current BD control word
1636 * here for proper error handling
1637 */
1638
1639 /*
1640 * Check if the BD we're currently looking at is the last BD
1641 * of the current transmission
1642 */
1643 bd_is_last = ((reg_val & ETH_XLNX_GEM_TXBD_LAST_BIT) != 0) ? 1 : 0;
1644
1645 /*
1646 * Reset control word of the current BD, clear everything but
1647 * the 'wrap' bit, then set the 'used' bit
1648 */
1649 reg_val &= ETH_XLNX_GEM_TXBD_WRAP_BIT;
1650 reg_val |= ETH_XLNX_GEM_TXBD_USED_BIT;
1651 sys_write32(reg_val, reg_ctrl);
1652
1653 /* Move on to the next BD or break out of the loop */
1654 if (bd_is_last == 1) {
1655 break;
1656 }
1657 curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count;
1658 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
1659 reg_val = sys_read32(reg_ctrl);
1660 } while (bd_is_last == 0 && curr_bd_idx != first_bd_idx);
1661
1662 if (curr_bd_idx == first_bd_idx && bd_is_last == 0) {
1663 LOG_WRN("%s TX done handling wrapped around", dev->name);
1664 }
1665
1666 dev_data->txbd_ring.next_to_process =
1667 (dev_data->txbd_ring.next_to_process + bds_processed) %
1668 dev_conf->txbd_count;
1669 dev_data->txbd_ring.free_bds += bds_processed;
1670
1671 if (dev_conf->defer_txd_to_queue) {
1672 k_sem_give(&(dev_data->txbd_ring.ring_sem));
1673 }
1674
1675 /* Clear the TX status register */
1676 sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
1677
1678 /* Re-enable the TX complete interrupt source */
1679 sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
1680 dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
1681
1682 /* Indicate completion to a blocking eth_xlnx_gem_send() call */
1683 k_sem_give(&dev_data->tx_done_sem);
1684 }
1685