| /Linux-v5.4/net/tipc/ |
| D | bcast.c | 66 int dests[MAX_BEARERS]; member 119 if (!bb->dests[i]) in tipc_bcbase_select_primary() 126 if (bb->dests[i] < all_dests) in tipc_bcbase_select_primary() 145 bb->dests[bearer_id]++; in tipc_bcast_inc_bearer_dst_cnt() 155 bb->dests[bearer_id]--; in tipc_bcast_dec_bearer_dst_cnt() 190 if (!bb->dests[bearer_id]) in tipc_bcbase_xmit() 205 static void tipc_bcast_select_xmit_method(struct net *net, int dests, in tipc_bcast_select_xmit_method() argument 242 method->rcast = dests <= bb->bc_threshold; in tipc_bcast_select_xmit_method() 282 struct tipc_nlist *dests, u16 *cong_link_cnt) in tipc_rcast_xmit() argument 291 list_for_each_entry_safe(dst, tmp, &dests->list, list) { in tipc_rcast_xmit() [all …]
|
| D | group.c | 84 struct tipc_nlist dests; member 174 tipc_nlist_init(&grp->dests, tipc_own_addr(net)); in tipc_group_create() 226 tipc_nlist_purge(&grp->dests); in tipc_group_delete() 319 tipc_nlist_add(&grp->dests, m->node); in tipc_group_create_member() 346 tipc_nlist_del(&grp->dests, m->node); in tipc_group_delete_member() 353 return &grp->dests; in tipc_group_dests()
|
| D | bcast.h | 90 struct tipc_mc_method *method, struct tipc_nlist *dests,
|
| /Linux-v5.4/drivers/md/ |
| D | dm-kcopyd.c | 362 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; member 579 r = dm_io(&io_req, job->num_dests, job->dests, NULL); in run_io_job() 587 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); in run_pages_job() 734 sub_job->dests[i].sector += progress; in segment_complete() 735 sub_job->dests[i].count = count; in segment_complete() 775 unsigned int num_dests, struct dm_io_region *dests, in dm_kcopyd_copy() argument 797 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); in dm_kcopyd_copy() 806 if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) { in dm_kcopyd_copy() 826 job->source.count = job->dests[0].count; in dm_kcopyd_copy() 834 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) { in dm_kcopyd_copy() [all …]
|
| /Linux-v5.4/include/linux/ |
| D | dm-kcopyd.h | 66 unsigned num_dests, struct dm_io_region *dests, 85 unsigned num_dests, struct dm_io_region *dests,
|
| /Linux-v5.4/drivers/i3c/master/ |
| D | dw-i3c-master.c | 669 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); in dw_i3c_ccc_set() 679 cmd->tx_buf = ccc->dests[0].payload.data; in dw_i3c_ccc_set() 680 cmd->tx_len = ccc->dests[0].payload.len; in dw_i3c_ccc_set() 682 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | in dw_i3c_ccc_set() 710 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr); in dw_i3c_ccc_get() 719 cmd->rx_buf = ccc->dests[0].payload.data; in dw_i3c_ccc_get() 720 cmd->rx_len = ccc->dests[0].payload.len; in dw_i3c_ccc_get() 722 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) | in dw_i3c_ccc_get()
|
| D | i3c-master-cdns.c | 701 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len); in cdns_i3c_master_send_ccc_cmd() 704 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr); in cdns_i3c_master_send_ccc_cmd() 708 ccmd->rx_buf = cmd->dests[0].payload.data; in cdns_i3c_master_send_ccc_cmd() 709 ccmd->rx_len = cmd->dests[0].payload.len; in cdns_i3c_master_send_ccc_cmd() 711 ccmd->tx_buf = cmd->dests[0].payload.data; in cdns_i3c_master_send_ccc_cmd() 712 ccmd->tx_len = cmd->dests[0].payload.len; in cdns_i3c_master_send_ccc_cmd()
|
| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
| D | eswitch_offloads.c | 185 dest[i].vport.num = attr->dests[j].rep->vport; in mlx5_eswitch_add_offloaded_rule() 187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id); in mlx5_eswitch_add_offloaded_rule() 191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { in mlx5_eswitch_add_offloaded_rule() 193 flow_act.pkt_reformat = attr->dests[j].pkt_reformat; in mlx5_eswitch_add_offloaded_rule() 196 attr->dests[j].pkt_reformat; in mlx5_eswitch_add_offloaded_rule() 272 dest[i].vport.num = attr->dests[i].rep->vport; in mlx5_eswitch_add_fwd_rule() 274 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id); in mlx5_eswitch_add_fwd_rule() 277 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { in mlx5_eswitch_add_fwd_rule() 279 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat; in mlx5_eswitch_add_fwd_rule() 320 if (attr->dests[i].termtbl) in __mlx5_eswitch_del_rule() [all …]
|
| D | eswitch_offloads_termtbl.c | 241 attr->dests[num_vport_dests].termtbl = tt; in mlx5_eswitch_add_termtbl_rule() 268 struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; in mlx5_eswitch_add_termtbl_rule()
|
| D | en_tc.c | 1184 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) in mlx5e_tc_add_fdb_flow() 1197 attr->dests[out_index].rep = rpriv->rep; in mlx5e_tc_add_fdb_flow() 1198 attr->dests[out_index].mdev = out_priv->mdev; in mlx5e_tc_add_fdb_flow() 1281 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { in mlx5e_tc_del_fdb_flow() 1326 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; in mlx5e_tc_encap_flows_add() 1327 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; in mlx5e_tc_encap_flows_add() 1333 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) in mlx5e_tc_encap_flows_add() 1335 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) { in mlx5e_tc_encap_flows_add() 1378 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; in mlx5e_tc_encap_flows_del() 3075 attr->dests[out_index].pkt_reformat = e->pkt_reformat; in mlx5e_attach_encap() [all …]
|
| D | eswitch.h | 392 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; member
|
| /Linux-v5.4/include/linux/i3c/ |
| D | ccc.h | 381 struct i3c_ccc_cmd_dest *dests; member
|
| /Linux-v5.4/Documentation/admin-guide/device-mapper/ |
| D | kcopyd.rst | 33 unsigned int num_dests, struct io_region *dests,
|
| /Linux-v5.4/drivers/i3c/ |
| D | master.c | 628 struct i3c_ccc_cmd_dest *dests, in i3c_ccc_cmd_init() argument 633 cmd->dests = dests; in i3c_ccc_cmd_init() 653 if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests)) in i3c_master_send_ccc_cmd_locked()
|