/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | ib_rep.c | 11 mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) in mlx5_ib_set_vport_rep() argument 17 vport_index = rep->vport_index; in mlx5_ib_set_vport_rep() 19 ibdev->port[vport_index].rep = rep; in mlx5_ib_set_vport_rep() 20 rep->rep_data[REP_IB].priv = ibdev; in mlx5_ib_set_vport_rep() 23 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); in mlx5_ib_set_vport_rep() 30 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) in mlx5_ib_vport_rep_load() argument 37 if (rep->vport == MLX5_VPORT_UPLINK) in mlx5_ib_vport_rep_load() 40 return mlx5_ib_set_vport_rep(dev, rep); in mlx5_ib_vport_rep_load() 54 vport_index = rep->vport_index; in mlx5_ib_vport_rep_load() 55 ibdev->port[vport_index].rep = rep; in mlx5_ib_vport_rep_load() [all …]
|
D | ib_virt.c | 56 struct mlx5_hca_vport_context *rep; in mlx5_ib_get_vf_config() local 59 rep = kzalloc(sizeof(*rep), GFP_KERNEL); in mlx5_ib_get_vf_config() 60 if (!rep) in mlx5_ib_get_vf_config() 63 err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep); in mlx5_ib_get_vf_config() 70 info->linkstate = mlx_to_net_policy(rep->policy); in mlx5_ib_get_vf_config() 75 kfree(rep); in mlx5_ib_get_vf_config()
|
/Linux-v5.4/tools/perf/ |
D | builtin-report.c | 107 struct report *rep = cb; in report__config() local 116 rep->min_percent = pcnt; in report__config() 125 return perf_config_u64(&rep->queue_size, var, value); in report__config() 140 struct report *rep = arg; in hist_iter__report_callback() local 147 if (!ui__has_annotation() && !rep->symbol_ipc) in hist_iter__report_callback() 158 } else if (rep->mem_mode) { in hist_iter__report_callback() 183 struct report *rep = arg; in hist_iter__branch_callback() local 189 if (!ui__has_annotation() && !rep->symbol_ipc) in hist_iter__branch_callback() 199 branch_type_count(&rep->brtype_stat, &bi->flags, in hist_iter__branch_callback() 216 struct report *rep = container_of(session->tool, struct report, tool); in process_feature_event() local [all …]
|
/Linux-v5.4/net/tipc/ |
D | netlink_compat.c | 60 struct sk_buff *rep; member 220 if (tipc_skb_tailroom(msg->rep) <= 1) { in __tipc_nl_compat_dumpit() 241 if ((TIPC_SKB_MAX - msg->rep->len) <= 1) { in __tipc_nl_compat_dumpit() 242 char *tail = skb_tail_pointer(msg->rep); in __tipc_nl_compat_dumpit() 265 msg->rep = tipc_tlv_alloc(msg->rep_size); in tipc_nl_compat_dumpit() 266 if (!msg->rep) in tipc_nl_compat_dumpit() 270 tipc_tlv_init(msg->rep, msg->rep_type); in tipc_nl_compat_dumpit() 275 kfree_skb(msg->rep); in tipc_nl_compat_dumpit() 276 msg->rep = NULL; in tipc_nl_compat_dumpit() 283 kfree_skb(msg->rep); in tipc_nl_compat_dumpit() [all …]
|
/Linux-v5.4/net/ceph/crush/ |
D | mapper.c | 467 int rep; in crush_choose_firstn() local 484 for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) { in crush_choose_firstn() 497 r = rep + parent_r; in crush_choose_firstn() 658 int rep; in crush_choose_indep() local 670 for (rep = outpos; rep < endpos; rep++) { in crush_choose_indep() 671 out[rep] = CRUSH_ITEM_UNDEF; in crush_choose_indep() 673 out2[rep] = CRUSH_ITEM_UNDEF; in crush_choose_indep() 680 for (rep = outpos; rep < endpos; rep++) { in crush_choose_indep() 681 dprintk(" %d", out[rep]); in crush_choose_indep() 685 for (rep = outpos; rep < endpos; rep++) { in crush_choose_indep() [all …]
|
/Linux-v5.4/net/vmw_vsock/ |
D | diag.c | 18 struct vsock_diag_msg *rep; in sk_diag_fill() local 21 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), in sk_diag_fill() 26 rep = nlmsg_data(nlh); in sk_diag_fill() 27 rep->vdiag_family = AF_VSOCK; in sk_diag_fill() 34 rep->vdiag_type = sk->sk_type; in sk_diag_fill() 35 rep->vdiag_state = sk->sk_state; in sk_diag_fill() 36 rep->vdiag_shutdown = sk->sk_shutdown; in sk_diag_fill() 37 rep->vdiag_src_cid = vsk->local_addr.svm_cid; in sk_diag_fill() 38 rep->vdiag_src_port = vsk->local_addr.svm_port; in sk_diag_fill() 39 rep->vdiag_dst_cid = vsk->remote_addr.svm_cid; in sk_diag_fill() [all …]
|
/Linux-v5.4/lib/zstd/ |
D | zstd_opt.h | 426 U32 offset, rep[ZSTD_REP_NUM]; in ZSTD_compressBlock_opt_generic() local 435 rep[i] = ctx->rep[i]; in ZSTD_compressBlock_opt_generic() 450 const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; in ZSTD_compressBlock_opt_generic() 509 opt[0].rep[i] = rep[i]; in ZSTD_compressBlock_opt_generic() 540 opt[cur].rep[2] = opt[cur - mlen].rep[1]; in ZSTD_compressBlock_opt_generic() 541 opt[cur].rep[1] = opt[cur - mlen].rep[0]; in ZSTD_compressBlock_opt_generic() 542 opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; in ZSTD_compressBlock_opt_generic() 544 opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; in ZSTD_compressBlock_opt_generic() 545 opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; in ZSTD_compressBlock_opt_generic() 546 opt[cur].rep[0] = in ZSTD_compressBlock_opt_generic() [all …]
|
/Linux-v5.4/net/sunrpc/xprtrdma/ |
D | verbs.c | 154 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, in rpcrdma_wc_receive() local 156 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_wc_receive() 165 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive() 166 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive() 167 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive() 169 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive() 170 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive() 174 rpcrdma_reply_handler(rep); in rpcrdma_wc_receive() 178 rpcrdma_recv_buffer_put(rep); in rpcrdma_wc_receive() 1048 struct rpcrdma_rep *rep; in rpcrdma_rep_create() local [all …]
|
D | rpc_rdma.c | 555 struct rpcrdma_rep *rep = req->rl_reply; in rpcrdma_sendctx_done() local 557 rpcrdma_complete_rqst(rep); in rpcrdma_sendctx_done() 558 rep->rr_rxprt->rx_stats.reply_waits_for_send++; in rpcrdma_sendctx_done() 1020 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_is_bcall() argument 1023 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_is_bcall() 1026 if (rep->rr_proc != rdma_msg) in rpcrdma_is_bcall() 1041 if (*p++ != rep->rr_xid) in rpcrdma_is_bcall() 1053 rpcrdma_bc_receive_call(r_xprt, rep); in rpcrdma_is_bcall() 1163 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_msg() argument 1166 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_decode_msg() [all …]
|
/Linux-v5.4/block/ |
D | blk-zoned.c | 104 static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep) in blkdev_report_zone() argument 108 if (rep->start < offset) in blkdev_report_zone() 111 rep->start -= offset; in blkdev_report_zone() 112 if (rep->start + rep->len > bdev->bd_part->nr_sects) in blkdev_report_zone() 115 if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL) in blkdev_report_zone() 116 rep->wp = rep->start + rep->len; in blkdev_report_zone() 118 rep->wp -= offset; in blkdev_report_zone() 319 struct blk_zone_report rep; in blkdev_report_zones_ioctl() local 336 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) in blkdev_report_zones_ioctl() 339 if (!rep.nr_zones) in blkdev_report_zones_ioctl() [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rep.c | 139 struct mlx5_eswitch_rep *rep = rpriv->rep; in mlx5e_rep_update_hw_counters() local 144 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); in mlx5e_rep_update_hw_counters() 146 pr_warn("vport %d error %d reading stats\n", rep->vport, err); in mlx5e_rep_update_hw_counters() 242 struct mlx5_eswitch_rep *rep = rpriv->rep; in mlx5e_replace_rep_vport_rx_rule() local 246 rep->vport, in mlx5e_replace_rep_vport_rx_rule() 409 struct mlx5_eswitch_rep *rep) in mlx5e_sqs2vport_stop() argument 417 rpriv = mlx5e_rep_to_rep_priv(rep); in mlx5e_sqs2vport_stop() 426 struct mlx5_eswitch_rep *rep, in mlx5e_sqs2vport_start() argument 438 rpriv = mlx5e_rep_to_rep_priv(rep); in mlx5e_sqs2vport_start() 448 rep->vport, in mlx5e_sqs2vport_start() [all …]
|
D | vport.c | 664 struct mlx5_hca_vport_context *rep) in mlx5_query_hca_vport_context() argument 699 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); in mlx5_query_hca_vport_context() 700 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware); in mlx5_query_hca_vport_context() 701 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi); in mlx5_query_hca_vport_context() 702 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw); in mlx5_query_hca_vport_context() 703 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy); in mlx5_query_hca_vport_context() 704 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx, in mlx5_query_hca_vport_context() 706 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state); in mlx5_query_hca_vport_context() 707 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx, in mlx5_query_hca_vport_context() 709 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid); in mlx5_query_hca_vport_context() [all …]
|
D | eswitch_offloads.c | 185 dest[i].vport.num = attr->dests[j].rep->vport; in mlx5_eswitch_add_offloaded_rule() 272 dest[i].vport.num = attr->dests[i].rep->vport; in mlx5_eswitch_add_fwd_rule() 354 struct mlx5_eswitch_rep *rep; in esw_set_global_vlan_pop() local 358 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { in esw_set_global_vlan_pop() 359 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) in esw_set_global_vlan_pop() 362 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); in esw_set_global_vlan_pop() 377 out_rep = attr->dests[0].rep; in esw_vlan_action_get_vport() 398 out_rep = attr->dests[0].rep; in esw_add_vlan_action_check() 452 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { in mlx5_eswitch_add_vlan_action() 516 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) in mlx5_eswitch_del_vlan_action() [all …]
|
/Linux-v5.4/net/unix/ |
D | diag.c | 126 struct unix_diag_msg *rep; in sk_diag_fill() local 128 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), in sk_diag_fill() 133 rep = nlmsg_data(nlh); in sk_diag_fill() 134 rep->udiag_family = AF_UNIX; in sk_diag_fill() 135 rep->udiag_type = sk->sk_type; in sk_diag_fill() 136 rep->udiag_state = sk->sk_state; in sk_diag_fill() 137 rep->pad = 0; in sk_diag_fill() 138 rep->udiag_ino = sk_ino; in sk_diag_fill() 139 sock_diag_save_cookie(sk, rep->udiag_cookie); in sk_diag_fill() 263 struct sk_buff *rep; in unix_diag_get_exact() local [all …]
|
/Linux-v5.4/net/netlink/ |
D | diag.c | 49 struct netlink_diag_msg *rep; in sk_diag_fill() local 52 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), in sk_diag_fill() 57 rep = nlmsg_data(nlh); in sk_diag_fill() 58 rep->ndiag_family = AF_NETLINK; in sk_diag_fill() 59 rep->ndiag_type = sk->sk_type; in sk_diag_fill() 60 rep->ndiag_protocol = sk->sk_protocol; in sk_diag_fill() 61 rep->ndiag_state = sk->sk_state; in sk_diag_fill() 63 rep->ndiag_ino = sk_ino; in sk_diag_fill() 64 rep->ndiag_portid = nlk->portid; in sk_diag_fill() 65 rep->ndiag_dst_portid = nlk->dst_portid; in sk_diag_fill() [all …]
|
/Linux-v5.4/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_surface.c | 91 struct drm_vmw_gb_surface_create_rep *rep, 96 struct drm_vmw_gb_surface_ref_ext_rep *rep, 711 struct drm_vmw_surface_arg *rep = &arg->rep; in vmw_surface_define_ioctl() local 881 rep->sid = user_srf->prime.base.handle; in vmw_surface_define_ioctl() 984 struct drm_vmw_surface_create_req *rep = &arg->rep; in vmw_surface_reference_ioctl() local 1001 rep->flags = (uint32_t)srf->flags; in vmw_surface_reference_ioctl() 1002 rep->format = srf->format; in vmw_surface_reference_ioctl() 1003 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); in vmw_surface_reference_ioctl() 1005 rep->size_addr; in vmw_surface_reference_ioctl() 1292 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; in vmw_gb_surface_define_ioctl() local [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
D | conn.c | 39 struct nvif_notify_conn_rep_v0 rep; in nvkm_conn_hpd() local 45 rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG; in nvkm_conn_hpd() 47 rep.mask = NVIF_NOTIFY_CONN_V0_PLUG; in nvkm_conn_hpd() 48 rep.version = 0; in nvkm_conn_hpd() 50 nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); in nvkm_conn_hpd()
|
/Linux-v5.4/include/trace/events/ |
D | rpcrdma.h | 23 const struct rpcrdma_rep *rep 26 TP_ARGS(rep), 29 __field(const void *, rep) 37 __entry->rep = rep; 38 __entry->r_xprt = rep->rr_rxprt; 39 __entry->xid = be32_to_cpu(rep->rr_xid); 40 __entry->version = be32_to_cpu(rep->rr_vers); 41 __entry->proc = be32_to_cpu(rep->rr_proc); 45 __entry->r_xprt, __entry->xid, __entry->rep, 53 const struct rpcrdma_rep *rep \ [all …]
|
/Linux-v5.4/drivers/hid/intel-ish-hid/ |
D | ishtp-hid.c | 104 static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep, in ishtp_hid_request() argument 109 unsigned int len = ((rep->size - 1) >> 3) + 1 + (rep->id > 0); in ishtp_hid_request() 119 hid_ishtp_get_report(hid, rep->id, rep->type); in ishtp_hid_request() 130 hid_output_report(rep, buf + header_size); in ishtp_hid_request() 131 hid_ishtp_set_feature(hid, buf, len, rep->id); in ishtp_hid_request()
|
/Linux-v5.4/arch/x86/boot/ |
D | copy.S | 25 rep; movsl 28 rep; movsb 41 rep; stosl 44 rep; stosb
|
/Linux-v5.4/lib/raid6/ |
D | unroll.awk | 12 if (/\$\$/) { rep = n } else { rep = 1 } 13 for (i = 0; i < rep; ++i) {
|
/Linux-v5.4/include/uapi/drm/ |
D | vmwgfx_drm.h | 243 struct drm_vmw_surface_arg rep; member 271 struct drm_vmw_surface_create_req rep; member 430 struct drm_vmw_bo_rep rep; member 962 struct drm_vmw_gb_surface_create_rep rep; member 1004 struct drm_vmw_gb_surface_ref_rep rep; member 1089 struct drm_vmw_context_arg rep; member 1167 struct drm_vmw_gb_surface_create_rep rep; member 1210 struct drm_vmw_gb_surface_ref_ext_rep rep; member
|
/Linux-v5.4/drivers/net/wireless/ath/ath5k/ |
D | eeprom.c | 1601 struct ath5k_edge_power *rep; in ath5k_eeprom_read_ctl_info() local 1625 rep = ee->ee_ctl_pwr; in ath5k_eeprom_read_ctl_info() 1641 rep += AR5K_EEPROM_N_EDGES; in ath5k_eeprom_read_ctl_info() 1647 rep[j].freq = (val >> 8) & fmask; in ath5k_eeprom_read_ctl_info() 1648 rep[j + 1].freq = val & fmask; in ath5k_eeprom_read_ctl_info() 1652 rep[j].edge = (val >> 8) & pmask; in ath5k_eeprom_read_ctl_info() 1653 rep[j].flag = (val >> 14) & 1; in ath5k_eeprom_read_ctl_info() 1654 rep[j + 1].edge = val & pmask; in ath5k_eeprom_read_ctl_info() 1655 rep[j + 1].flag = (val >> 6) & 1; in ath5k_eeprom_read_ctl_info() 1659 rep[0].freq = (val >> 9) & fmask; in ath5k_eeprom_read_ctl_info() [all …]
|
/Linux-v5.4/drivers/media/i2c/ |
D | ir-kbd-i2c.c | 500 int rep, i, l, p = 0, s, c = 0; in zilog_ir_format() local 564 for (rep = c / 3; rep >= 1; rep--) { in zilog_ir_format() 565 if (!memcmp(&codes[c - rep * 3], &codes[c - rep * 2], rep) && in zilog_ir_format() 566 !cmp_no_trail(&codes[c - rep], &codes[c - rep * 2], rep)) { in zilog_ir_format() 574 int leading = c - rep * 3; in zilog_ir_format() 576 if (leading >= ARRAY_SIZE(code_block->codes) - 3 - rep) { in zilog_ir_format() 581 dev_dbg(&rcdev->dev, "found trailing %d repeat\n", rep); in zilog_ir_format() 585 rep); in zilog_ir_format() 586 c = leading + 1 + rep; in zilog_ir_format()
|
/Linux-v5.4/fs/cachefiles/ |
D | namei.c | 290 struct dentry *rep, in cachefiles_bury_object() argument 299 _enter(",'%pd','%pd'", dir, rep); in cachefiles_bury_object() 301 _debug("remove %p from %p", rep, dir); in cachefiles_bury_object() 304 if (!d_is_dir(rep)) { in cachefiles_bury_object() 309 ret = security_path_unlink(&path, rep); in cachefiles_bury_object() 313 trace_cachefiles_unlink(object, rep, why); in cachefiles_bury_object() 314 ret = vfs_unlink(d_inode(dir), rep, NULL); in cachefiles_bury_object() 317 cachefiles_mark_object_buried(cache, rep, why); in cachefiles_bury_object() 343 if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) { in cachefiles_bury_object() 357 if (trap == rep) { in cachefiles_bury_object() [all …]
|