/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_domain.c | 7 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ argument 8 ((dmn)->info.caps.dmn_type##_sw_owner || \ 9 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ 10 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) 12 static int dr_domain_init_cache(struct mlx5dr_domain *dmn) in dr_domain_init_cache() argument 17 dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, in dr_domain_init_cache() 18 sizeof(dmn->cache.recalc_cs_ft[0]), in dr_domain_init_cache() 20 if (!dmn->cache.recalc_cs_ft) in dr_domain_init_cache() 26 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) in dr_domain_uninit_cache() argument 30 for (i = 0; i < dmn->info.caps.num_vports; i++) { in dr_domain_uninit_cache() [all …]
|
D | dr_fw.c | 8 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) in mlx5dr_fw_create_recalc_cs_ft() argument 21 ft_attr.level = dmn->info.caps.max_ft_level - 1; in mlx5dr_fw_create_recalc_cs_ft() 24 ret = mlx5dr_cmd_create_flow_table(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 29 mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret); in mlx5dr_fw_create_recalc_cs_ft() 33 ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 37 mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret); in mlx5dr_fw_create_recalc_cs_ft() 46 ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1, in mlx5dr_fw_create_recalc_cs_ft() 50 mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret); in mlx5dr_fw_create_recalc_cs_ft() 54 ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev, in mlx5dr_fw_create_recalc_cs_ft() 59 mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret); in mlx5dr_fw_create_recalc_cs_ft() [all …]
|
D | dr_action.c | 436 static void dr_actions_apply(struct mlx5dr_domain *dmn, in dr_actions_apply() argument 443 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; in dr_actions_apply() 447 mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set, in dr_actions_apply() 450 mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set, in dr_actions_apply() 491 static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, in dr_action_handle_cs_recalc() argument 506 mlx5dr_dbg(dmn, in dr_action_handle_cs_recalc() 516 ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, in dr_action_handle_cs_recalc() 520 mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); in dr_action_handle_cs_recalc() 532 static void dr_action_print_sequence(struct mlx5dr_domain *dmn, in dr_action_print_sequence() argument 539 mlx5dr_err(dmn, "< %s (%d) > ", in dr_action_print_sequence() [all …]
|
D | dr_matcher.c | 124 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_vxlan_gpe() argument 127 dr_matcher_supp_vxlan_gpe(&dmn->info.caps); in dr_mask_is_tnl_vxlan_gpe() 152 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_geneve() argument 155 dr_matcher_supp_tnl_geneve(&dmn->info.caps); in dr_mask_is_tnl_geneve() 169 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_gtpu() argument 172 dr_matcher_supp_tnl_gtpu(&dmn->info.caps); in dr_mask_is_tnl_gtpu() 181 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_gtpu_dw_0() argument 184 dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps); in dr_mask_is_tnl_gtpu_dw_0() 193 struct mlx5dr_domain *dmn) in dr_mask_is_tnl_gtpu_teid() argument 196 dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps); in dr_mask_is_tnl_gtpu_teid() [all …]
|
D | dr_table.c | 17 mlx5dr_domain_lock(tbl->dmn); in mlx5dr_table_set_miss_action() 24 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || in mlx5dr_table_set_miss_action() 25 tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { in mlx5dr_table_set_miss_action() 38 ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, in mlx5dr_table_set_miss_action() 43 mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret); in mlx5dr_table_set_miss_action() 48 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX || in mlx5dr_table_set_miss_action() 49 tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { in mlx5dr_table_set_miss_action() 62 ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, in mlx5dr_table_set_miss_action() 66 mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret); in mlx5dr_table_set_miss_action() 81 mlx5dr_domain_unlock(tbl->dmn); in mlx5dr_table_set_miss_action() [all …]
|
D | dr_send.c | 312 static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, in dr_handle_pending_wc() argument 323 dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN) in dr_handle_pending_wc() 329 mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited", in dr_handle_pending_wc() 361 static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, in dr_postsend_icm_data() argument 364 struct mlx5dr_send_ring *send_ring = dmn->send_ring; in dr_postsend_icm_data() 368 if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || in dr_postsend_icm_data() 370 mlx5_core_dbg_once(dmn->mdev, in dr_postsend_icm_data() 372 send_ring->err_state, dmn->mdev->state); in dr_postsend_icm_data() 378 ret = dr_handle_pending_wc(dmn, send_ring); in dr_postsend_icm_data() 382 if (send_info->write.length > dmn->info.max_inline_size) { in dr_postsend_icm_data() [all …]
|
D | dr_rule.c | 45 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in dr_rule_create_collision_htbl() local 46 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; in dr_rule_create_collision_htbl() 51 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, in dr_rule_create_collision_htbl() 56 mlx5dr_dbg(dmn, "Failed allocating collision table\n"); in dr_rule_create_collision_htbl() 79 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n"); in dr_rule_create_collision_entry() 92 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); in dr_rule_create_collision_entry() 105 struct mlx5dr_domain *dmn) in dr_rule_handle_one_ste_in_update_list() argument 119 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data, in dr_rule_handle_one_ste_in_update_list() 130 struct mlx5dr_domain *dmn, in dr_rule_send_update_list() argument 140 dmn); in dr_rule_send_update_list() [all …]
|
D | dr_icm_pool.c | 12 struct mlx5dr_domain *dmn; member 29 struct mlx5dr_domain *dmn; member 64 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 74 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 93 mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); in dr_icm_pool_mr_create() 98 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, in dr_icm_pool_mr_create() 104 mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err); in dr_icm_pool_mr_create() 111 mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n", in dr_icm_pool_mr_create() 130 struct mlx5_core_dev *mdev = icm_mr->dmn->mdev; in dr_icm_pool_mr_destroy() 266 mlx5dr_err(pool->dmn, in dr_icm_chunk_create() [all …]
|
D | dr_types.h | 25 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) argument 26 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) argument 27 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) argument 212 struct mlx5dr_domain *dmn; member 283 struct mlx5dr_domain *dmn, 289 struct mlx5dr_domain *dmn, 356 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, 473 struct mlx5dr_domain *dmn, 855 struct mlx5dr_domain *dmn; member 901 struct mlx5dr_domain *dmn; member [all …]
|
D | mlx5dr.h | 50 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, 81 mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num); 96 mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, 106 mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id); 112 mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
|
D | dr_ste.c | 294 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in mlx5dr_ste_free() local 295 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; in mlx5dr_ste_free() 346 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste, in mlx5dr_ste_free() 399 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, in mlx5dr_ste_htbl_init_and_postsend() argument 407 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, in mlx5dr_ste_htbl_init_and_postsend() 408 dmn->info.caps.gvmi, in mlx5dr_ste_htbl_init_and_postsend() 414 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste); in mlx5dr_ste_htbl_init_and_postsend() 424 struct mlx5dr_domain *dmn = matcher->tbl->dmn; in mlx5dr_ste_create_next_htbl() local 425 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; in mlx5dr_ste_create_next_htbl() 436 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, in mlx5dr_ste_create_next_htbl() [all …]
|
D | dr_ste.h | 160 void (*set_actions_rx)(struct mlx5dr_domain *dmn, 165 void (*set_actions_tx)(struct mlx5dr_domain *dmn,
|
D | dr_ste_v0.c | 408 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, in dr_ste_v0_set_actions_tx() argument 465 if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required)) in dr_ste_v0_set_actions_tx() 477 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, in dr_ste_v0_set_actions_rx() argument 1647 struct mlx5dr_domain *dmn = sb->dmn; in dr_ste_v0_build_src_gvmi_qpn_tag() local 1656 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) in dr_ste_v0_build_src_gvmi_qpn_tag() 1657 caps = &dmn->info.caps; in dr_ste_v0_build_src_gvmi_qpn_tag() 1658 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == in dr_ste_v0_build_src_gvmi_qpn_tag() 1659 dmn->peer_dmn->info.caps.gvmi)) in dr_ste_v0_build_src_gvmi_qpn_tag() 1660 caps = &dmn->peer_dmn->info.caps; in dr_ste_v0_build_src_gvmi_qpn_tag() 1666 caps = &dmn->info.caps; in dr_ste_v0_build_src_gvmi_qpn_tag() [all …]
|
D | dr_ste_v1.c | 513 static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, in dr_ste_v1_set_actions_tx() argument 631 static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, in dr_ste_v1_set_actions_rx() argument 1778 struct mlx5dr_domain *dmn = sb->dmn; in dr_ste_v1_build_src_gvmi_qpn_tag() local 1786 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) in dr_ste_v1_build_src_gvmi_qpn_tag() 1787 caps = &dmn->info.caps; in dr_ste_v1_build_src_gvmi_qpn_tag() 1788 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == in dr_ste_v1_build_src_gvmi_qpn_tag() 1789 dmn->peer_dmn->info.caps.gvmi)) in dr_ste_v1_build_src_gvmi_qpn_tag() 1790 caps = &dmn->peer_dmn->info.caps; in dr_ste_v1_build_src_gvmi_qpn_tag() 1796 caps = &dmn->info.caps; in dr_ste_v1_build_src_gvmi_qpn_tag() 1804 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", in dr_ste_v1_build_src_gvmi_qpn_tag()
|
/Linux-v5.15/drivers/powercap/ |
D | intel_rapl_common.c | 1080 int dmn, prim; in rapl_update_domain_data() local 1083 for (dmn = 0; dmn < rp->nr_domains; dmn++) { in rapl_update_domain_data() 1085 rp->domains[dmn].name); in rapl_update_domain_data() 1088 if (!rapl_read_data_raw(&rp->domains[dmn], prim, in rapl_update_domain_data() 1090 rp->domains[dmn].rdd.primitives[prim] = val; in rapl_update_domain_data()
|