Lines Matching refs:port

407 #define ETH_XLNX_GEM_NET_DEV_INIT(port) \  argument
408 ETH_NET_DEVICE_DT_INST_DEFINE(port,\
411 &eth_xlnx_gem##port##_dev_data,\
412 &eth_xlnx_gem##port##_dev_cfg,\
418 #define ETH_XLNX_GEM_DEV_CONFIG(port) \ argument
419 static const struct eth_xlnx_gem_dev_cfg eth_xlnx_gem##port##_dev_cfg = {\
420 .base_addr = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0),\
421 .config_func = eth_xlnx_gem##port##_irq_config,\
422 .pll_clock_frequency = DT_INST_PROP(port, clock_frequency),\
423 .clk_ctrl_reg_address = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 1),\
425 (DT_INST_PROP(port, mdc_divider)),\
427 (DT_INST_PROP(port, link_speed)),\
428 .init_phy = DT_INST_PROP(port, init_mdio_phy),\
429 .phy_mdio_addr_fix = DT_INST_PROP(port, mdio_phy_address),\
430 .phy_advertise_lower = DT_INST_PROP(port, advertise_lower_link_speeds),\
431 .phy_poll_interval = DT_INST_PROP(port, phy_poll_interval),\
432 .defer_rxp_to_queue = !DT_INST_PROP(port, handle_rx_in_isr),\
433 .defer_txd_to_queue = DT_INST_PROP(port, handle_tx_in_workq),\
435 (DT_INST_PROP(port, amba_ahb_dbus_width)),\
437 (DT_INST_PROP(port, amba_ahb_burst_length)),\
439 (DT_INST_PROP(port, hw_rx_buffer_size)),\
441 (DT_INST_PROP(port, hw_rx_buffer_offset)),\
443 (DT_INST_PROP(port, rx_buffer_descriptors)),\
445 (DT_INST_PROP(port, tx_buffer_descriptors)),\
446 .rx_buffer_size = (((uint16_t)(DT_INST_PROP(port, rx_buffer_size)) +\
448 .tx_buffer_size = (((uint16_t)(DT_INST_PROP(port, tx_buffer_size)) +\
450 .ignore_ipg_rxer = DT_INST_PROP(port, ignore_ipg_rxer),\
451 .disable_reject_nsp = DT_INST_PROP(port, disable_reject_nsp),\
452 .enable_ipg_stretch = DT_INST_PROP(port, ipg_stretch),\
453 .enable_sgmii_mode = DT_INST_PROP(port, sgmii_mode),\
454 .disable_reject_fcs_crc_errors = DT_INST_PROP(port, disable_reject_fcs_crc_errors),\
455 .enable_rx_halfdup_while_tx = DT_INST_PROP(port, rx_halfdup_while_tx),\
456 .enable_rx_chksum_offload = DT_INST_PROP(port, rx_checksum_offload),\
457 .disable_pause_copy = DT_INST_PROP(port, disable_pause_copy),\
458 .discard_rx_fcs = DT_INST_PROP(port, discard_rx_fcs),\
459 .discard_rx_length_errors = DT_INST_PROP(port, discard_rx_length_errors),\
460 .enable_pause = DT_INST_PROP(port, pause_frame),\
461 .enable_tbi = DT_INST_PROP(port, tbi),\
462 .ext_addr_match = DT_INST_PROP(port, ext_address_match),\
463 .enable_1536_frames = DT_INST_PROP(port, long_frame_rx_support),\
464 .enable_ucast_hash = DT_INST_PROP(port, unicast_hash),\
465 .enable_mcast_hash = DT_INST_PROP(port, multicast_hash),\
466 .disable_bcast = DT_INST_PROP(port, reject_broadcast),\
467 .copy_all_frames = DT_INST_PROP(port, promiscuous_mode),\
468 .discard_non_vlan = DT_INST_PROP(port, discard_non_vlan),\
469 .enable_fdx = DT_INST_PROP(port, full_duplex),\
470 .disc_rx_ahb_unavail = DT_INST_PROP(port, discard_rx_frame_ahb_unavail),\
471 .enable_tx_chksum_offload = DT_INST_PROP(port, tx_checksum_offload),\
472 .tx_buffer_size_full = DT_INST_PROP(port, hw_tx_buffer_size_full),\
473 .enable_ahb_packet_endian_swap = DT_INST_PROP(port, ahb_packet_endian_swap),\
474 .enable_ahb_md_endian_swap = DT_INST_PROP(port, ahb_md_endian_swap)\
478 #define ETH_XLNX_GEM_DEV_DATA(port) \ argument
479 static struct eth_xlnx_gem_dev_data eth_xlnx_gem##port##_dev_data = {\
480 .mac_addr = DT_INST_PROP(port, local_mac_address),\
491 #define ETH_XLNX_GEM_DMA_AREA_DECL(port) \ argument
492 struct eth_xlnx_dma_area_gem##port {\
493 struct eth_xlnx_gem_bd rx_bd[DT_INST_PROP(port, rx_buffer_descriptors)];\
494 struct eth_xlnx_gem_bd tx_bd[DT_INST_PROP(port, tx_buffer_descriptors)];\
496 [DT_INST_PROP(port, rx_buffer_descriptors)]\
497 [((DT_INST_PROP(port, rx_buffer_size)\
501 [DT_INST_PROP(port, tx_buffer_descriptors)]\
502 [((DT_INST_PROP(port, tx_buffer_size)\
508 #define ETH_XLNX_GEM_DMA_AREA_INST(port) \ argument
509 static struct eth_xlnx_dma_area_gem##port eth_xlnx_gem##port##_dma_area\
513 #define ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port) \ argument
514 static void eth_xlnx_gem##port##_irq_config(const struct device *dev)\
517 IRQ_CONNECT(DT_INST_IRQN(port), DT_INST_IRQ(port, priority),\
518 eth_xlnx_gem_isr, DEVICE_DT_INST_GET(port), 0);\
519 irq_enable(DT_INST_IRQN(port));\
523 #define ETH_XLNX_GEM_INIT_BD_RING(port) \ argument
524 if (dev_conf->base_addr == DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0)) {\
525 dev_data->rxbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.rx_bd[0]);\
526 dev_data->txbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.tx_bd[0]);\
527 dev_data->first_rx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.rx_buffer;\
528 dev_data->first_tx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.tx_buffer;\
532 #define ETH_XLNX_GEM_INITIALIZE(port) \ argument
533 ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port);\
534 ETH_XLNX_GEM_DEV_CONFIG(port);\
535 ETH_XLNX_GEM_DEV_DATA(port);\
536 ETH_XLNX_GEM_DMA_AREA_DECL(port);\
537 ETH_XLNX_GEM_DMA_AREA_INST(port);\
538 ETH_XLNX_GEM_NET_DEV_INIT(port);\