| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| D | sf.h | 11 return MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_start_function_id() 18 return MLX5_CAP_GEN(dev, sf); in mlx5_sf_supported() 25 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_max_functions() 26 return MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_max_functions() 28 return 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_max_functions()
|
| D | clock.h | 38 u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); in mlx5_is_real_time_rq() 47 u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format); in mlx5_is_real_time_sq()
|
| D | gid.c | 130 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in mlx5_core_roce_gid_set() 145 if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0) in mlx5_core_roce_gid_set()
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/ |
| D | fw.c | 151 if (MLX5_CAP_GEN(dev, hca_cap_2)) { in mlx5_query_hca_caps() 157 if (MLX5_CAP_GEN(dev, eth_net_offloads)) { in mlx5_query_hca_caps() 163 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { in mlx5_query_hca_caps() 169 if (MLX5_CAP_GEN(dev, pg)) { in mlx5_query_hca_caps() 175 if (MLX5_CAP_GEN(dev, atomic)) { in mlx5_query_hca_caps() 181 if (MLX5_CAP_GEN(dev, roce)) { in mlx5_query_hca_caps() 187 if (MLX5_CAP_GEN(dev, nic_flow_table) || in mlx5_query_hca_caps() 188 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { in mlx5_query_hca_caps() 194 if (MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_query_hca_caps() 207 if (MLX5_CAP_GEN(dev, vector_calc)) { in mlx5_query_hca_caps() [all …]
|
| D | vport.c | 271 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_query_nic_vport_mac_list() 272 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_query_nic_vport_mac_list() 329 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_modify_nic_vport_mac_list() 330 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_modify_nic_vport_mac_list() 380 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); in mlx5_modify_nic_vport_vlans() 468 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5_modify_nic_vport_node_guid() 528 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_gid() 529 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); in mlx5_query_hca_vport_gid() 562 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_gid() 595 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_pkey() [all …]
|
| D | en_dcbnl.c | 60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \ 92 if (!MLX5_CAP_GEN(priv->mdev, dcbx)) in mlx5e_dcbnl_switch_to_host_mode() 117 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_getets() 326 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_setets() 422 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { in mlx5e_dcbnl_setdcbx() 454 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_setapp() 507 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_delapp() 629 if (!MLX5_CAP_GEN(mdev, ets)) in mlx5e_dcbnl_setall() 738 if (!MLX5_CAP_GEN(priv->mdev, ets)) { in mlx5e_dcbnl_getpgtccfgtx() 1025 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) in mlx5e_dcbnl_build_netdev() [all …]
|
| D | pci_irq.c | 70 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count() 71 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count() 100 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev)) in mlx5_set_msix_vec_count() 103 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count() 104 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count() 585 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_irq_table_create() 586 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_irq_table_create() 587 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_irq_table_create() 595 pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + in mlx5_irq_table_create()
|
| D | mlx5_core.h | 201 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ 202 MLX5_CAP_GEN((mdev), pps_modify) && \ 231 return MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_lag_is_lacp_owner() 232 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && in mlx5_lag_is_lacp_owner() 233 MLX5_CAP_GEN(dev, lag_master); in mlx5_lag_is_lacp_owner()
|
| D | uar.c | 65 if (MLX5_CAP_GEN(mdev, uar_4k)) in uars_per_sys_page() 66 return MLX5_CAP_GEN(mdev, num_of_uars_per_page); in uars_per_sys_page() 75 if (MLX5_CAP_GEN(mdev, uar_4k)) in uar2pfn() 201 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; in map_offset() 281 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); in addr_to_dbi_in_syspage()
|
| D | dev.c | 61 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in mlx5_eth_supported() 64 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { in mlx5_eth_supported() 69 if (!MLX5_CAP_GEN(dev, nic_flow_table)) { in mlx5_eth_supported() 102 if (!MLX5_CAP_GEN(dev, cq_moderation)) in mlx5_eth_supported() 186 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in is_mp_supported()
|
| D | eq.c | 311 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) in create_map_eq() 549 if (MLX5_CAP_GEN(dev, general_notification_event)) in gather_async_events_mask() 552 if (MLX5_CAP_GEN(dev, port_module_event)) in gather_async_events_mask() 560 if (MLX5_CAP_GEN(dev, fpga)) in gather_async_events_mask() 566 if (MLX5_CAP_GEN(dev, temp_warn_event)) in gather_async_events_mask() 572 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) in gather_async_events_mask() 584 if (MLX5_CAP_GEN(dev, event_cap)) in gather_async_events_mask() 997 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_eq_table_create() 998 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_eq_table_create() 999 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_eq_table_create()
|
| D | en_ethtool.c | 520 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) in mlx5e_ethtool_get_coalesce() 593 if (!MLX5_CAP_GEN(mdev, cq_moderation)) in mlx5e_ethtool_set_coalesce() 1410 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5e_ethtool_set_pauseparam() 1442 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || in mlx5e_ethtool_get_ts_info() 1471 if (MLX5_CAP_GEN(mdev, wol_g)) in mlx5e_get_wol_supported() 1474 if (MLX5_CAP_GEN(mdev, wol_s)) in mlx5e_get_wol_supported() 1477 if (MLX5_CAP_GEN(mdev, wol_a)) in mlx5e_get_wol_supported() 1480 if (MLX5_CAP_GEN(mdev, wol_b)) in mlx5e_get_wol_supported() 1483 if (MLX5_CAP_GEN(mdev, wol_m)) in mlx5e_get_wol_supported() 1486 if (MLX5_CAP_GEN(mdev, wol_u)) in mlx5e_get_wol_supported() [all …]
|
| D | en_common.c | 42 bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); in mlx5e_mkey_set_relaxed_ordering() 43 bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); in mlx5e_mkey_set_relaxed_ordering()
|
| /Linux-v5.15/drivers/infiniband/hw/mlx5/ |
| D | main.c | 108 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); in mlx5_ib_port_link_layer() 524 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); in mlx5_query_port_roce() 629 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) in mlx5_use_mad_ifc() 630 return !MLX5_CAP_GEN(dev->mdev, ib_virt); in mlx5_use_mad_ifc() 726 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, in mlx5_query_max_pkeys() 810 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); in mlx5_ib_query_device() 845 if (MLX5_CAP_GEN(mdev, pkv)) in mlx5_ib_query_device() 847 if (MLX5_CAP_GEN(mdev, qkv)) in mlx5_ib_query_device() 849 if (MLX5_CAP_GEN(mdev, apm)) in mlx5_ib_query_device() 851 if (MLX5_CAP_GEN(mdev, xrc)) in mlx5_ib_query_device() [all …]
|
| D | counters.c | 272 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_get_hw_stats() 387 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters() 394 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { in mlx5_ib_fill_counters() 401 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { in mlx5_ib_fill_counters() 408 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters() 415 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters() 438 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters() 441 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters() 444 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) in __mlx5_ib_alloc_counters() 447 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) in __mlx5_ib_alloc_counters() [all …]
|
| D | qp.c | 358 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) in set_rq_size() 393 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { in set_rq_size() 396 MLX5_CAP_GEN(dev->mdev, in set_rq_size() 523 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in calc_sq_size() 525 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in calc_sq_size() 535 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size() 539 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); in calc_sq_size() 562 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in set_user_buf_size() 564 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in set_user_buf_size() 576 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size() [all …]
|
| D | mlx5_ib.h | 1512 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? in get_uars_per_sys_page() 1539 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in mlx5_ib_can_load_pas_with_umr() 1546 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && in mlx5_ib_can_load_pas_with_umr() 1564 MLX5_CAP_GEN(dev->mdev, atomic) && in mlx5_ib_can_reconfig_with_umr() 1565 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in mlx5_ib_can_reconfig_with_umr() 1569 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in mlx5_ib_can_reconfig_with_umr() 1570 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in mlx5_ib_can_reconfig_with_umr() 1574 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && in mlx5_ib_can_reconfig_with_umr() 1575 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in mlx5_ib_can_reconfig_with_umr() 1609 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && in mlx5_ib_lag_should_assign_affinity() [all …]
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| D | monitor_stats.c | 27 if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) in mlx5e_monitor_counter_supported() 30 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) < in mlx5e_monitor_counter_supported() 33 if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) < in mlx5e_monitor_counter_supported() 103 int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); in mlx5e_set_monitor_counter() 104 int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); in mlx5e_set_monitor_counter() 106 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); in mlx5e_set_monitor_counter()
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
| D | dev.c | 24 return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); in mlx5_sf_dev_supported() 159 base_id = MLX5_CAP_GEN(table->dev, sf_base_id); in mlx5_sf_dev_state_change_handler() 198 function_id = MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_dev_vhca_arm_all() 227 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_dev_table_create() 228 max_sfs = MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_dev_table_create() 230 max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_dev_table_create() 231 table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); in mlx5_sf_dev_table_create()
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
| D | dr_cmd.c | 115 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); in mlx5dr_cmd_query_device() 116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); in mlx5dr_cmd_query_device() 117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); in mlx5dr_cmd_query_device() 118 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); in mlx5dr_cmd_query_device() 119 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); in mlx5dr_cmd_query_device() 121 if (MLX5_CAP_GEN(mdev, roce)) { in mlx5dr_cmd_query_device() 133 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); in mlx5dr_cmd_query_device() 136 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); in mlx5dr_cmd_query_device() 137 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); in mlx5dr_cmd_query_device() 142 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); in mlx5dr_cmd_query_device() [all …]
|
| D | mlx5dr.h | 135 return MLX5_CAP_GEN(dev, roce) && in mlx5dr_is_supported() 138 (MLX5_CAP_GEN(dev, steering_format_version) <= in mlx5dr_is_supported()
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
| D | egress_ofld.c | 63 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create() 111 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create() 178 !MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup() 188 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup() 244 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); in mlx5_esw_acl_egress_vport_bond()
|
| /Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/accel/ |
| D | ipsec_offload.h | 15 if (!MLX5_CAP_GEN(mdev, ipsec_offload)) in mlx5_is_ipsec_device() 18 if (!MLX5_CAP_GEN(mdev, log_max_dek)) in mlx5_is_ipsec_device()
|
| D | tls.h | 48 return MLX5_CAP_GEN(mdev, tls_tx); in mlx5_accel_is_ktls_tx() 53 return MLX5_CAP_GEN(mdev, tls_rx); in mlx5_accel_is_ktls_rx() 62 if (!MLX5_CAP_GEN(mdev, log_max_dek)) in mlx5_accel_is_ktls_device()
|
| /Linux-v5.15/include/linux/mlx5/ |
| D | vport.h | 40 (MLX5_CAP_GEN(mdev, vport_group_manager) && \ 41 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|