Searched refs:MLX5_MAX_PORTS (Results 1 – 9 of 9) sorted by relevance
47 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];76 u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];78 struct lag_func pf[MLX5_MAX_PORTS];95 MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) in mlx5_is_lag_supported()
70 u8 enabled_ports[MLX5_MAX_PORTS] = {}; in lag_active_port_bits()185 char buf[MLX5_MAX_PORTS * 10 + 1] = {}; in mlx5_lag_print_mapping()186 u8 enabled_ports[MLX5_MAX_PORTS] = {}; in mlx5_lag_print_mapping()318 int disabled[MLX5_MAX_PORTS] = {}; in mlx5_infer_tx_affinity_mapping()319 int enabled[MLX5_MAX_PORTS] = {}; in mlx5_infer_tx_affinity_mapping()385 u8 disabled_ports[MLX5_MAX_PORTS] = {}; in mlx5_lag_drop_rule_setup()452 u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {}; in mlx5_modify_lag()1243 (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS || in mlx5_lag_add_mdev()1539 mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL); in mlx5_lag_query_cong_counters()
16 struct mlx5_flow_handle *rules[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
104 u8 ports[MLX5_MAX_PORTS] = {}; in mapping_show()
606 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS || in next_phys_dev_lag()
185 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); in mlx5e_get_num_lag_ports()910 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
1658 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); in esw_create_send_to_vport_group()1856 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + in esw_create_offloads_fdb_tables()
87 MLX5_MAX_PORTS = 4, enumerator1270 if (idx >= 1 && idx <= MLX5_MAX_PORTS) in mlx5_get_dev_index()
1106 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];