Home
last modified time | relevance | path

Searched refs:mw (Results 1 – 25 of 40) sorted by relevance

12

/Linux-v6.1/drivers/infiniband/sw/rxe/
Drxe_mw.c18 struct rxe_mw *mw = to_rmw(ibmw); in rxe_alloc_mw() local
25 ret = rxe_add_to_pool(&rxe->mw_pool, mw); in rxe_alloc_mw()
31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw()
32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? in rxe_alloc_mw()
34 spin_lock_init(&mw->lock); in rxe_alloc_mw()
36 rxe_finalize(mw); in rxe_alloc_mw()
43 struct rxe_mw *mw = to_rmw(ibmw); in rxe_dealloc_mw() local
45 rxe_cleanup(mw); in rxe_dealloc_mw()
51 struct rxe_mw *mw, struct rxe_mr *mr) in rxe_check_bind_mw() argument
53 if (mw->ibmw.type == IB_MW_TYPE_1) { in rxe_check_bind_mw()
[all …]
Drxe_resp.c409 struct rxe_mw *mw = NULL; in check_rkey() local
451 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
452 if (!mw) { in check_rkey()
459 mr = mw->mr; in check_rkey()
466 if (mw->access & IB_ZERO_BASED) in check_rkey()
467 qp->resp.offset = mw->addr; in check_rkey()
469 rxe_put(mw); in check_rkey()
515 if (mw) in check_rkey()
516 rxe_put(mw); in check_rkey()
725 struct rxe_mw *mw; in rxe_recheck_mr() local
[all …]
Drxe_verbs.h453 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) in to_rmw() argument
455 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; in to_rmw()
468 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) in rxe_mw_pd() argument
470 return to_rpd(mw->ibmw.pd); in rxe_mw_pd()
/Linux-v6.1/net/netfilter/ipvs/
Dip_vs_wrr.c63 int mw; /* maximum weight */ member
119 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_init_svc()
120 mark->cw = mark->mw; in ip_vs_wrr_init_svc()
146 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_dest_changed()
147 if (mark->cw > mark->mw || !mark->cw) in ip_vs_wrr_dest_changed()
148 mark->cw = mark->mw; in ip_vs_wrr_dest_changed()
172 if (mark->mw == 0) in ip_vs_wrr_schedule()
188 mark->cw = mark->mw; in ip_vs_wrr_schedule()
Dip_vs_mh.c347 int mw, shift; in ip_vs_mh_shift_weight() local
365 mw = weight / gcd; in ip_vs_mh_shift_weight()
368 shift = fls(mw) - IP_VS_MH_TAB_BITS; in ip_vs_mh_shift_weight()
/Linux-v6.1/drivers/ntb/
Dntb_transport.c617 struct ntb_transport_mw *mw; in ntb_transport_setup_qp_mw() local
629 mw = &nt->mw_vec[mw_num]; in ntb_transport_setup_qp_mw()
631 if (!mw->virt_addr) in ntb_transport_setup_qp_mw()
639 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; in ntb_transport_setup_qp_mw()
640 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw()
793 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; in ntb_free_mw() local
796 if (!mw->virt_addr) in ntb_free_mw()
800 dma_free_coherent(&pdev->dev, mw->alloc_size, in ntb_free_mw()
801 mw->alloc_addr, mw->dma_addr); in ntb_free_mw()
802 mw->xlat_size = 0; in ntb_free_mw()
[all …]
/Linux-v6.1/fs/ocfs2/
Ddlmglue.c434 struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() argument
447 kt = ktime_sub(ktime_get(), mw->mw_lock_start); in ocfs2_update_lock_stats()
474 struct ocfs2_mask_waiter *mw; in ocfs2_track_lock_wait() local
481 mw = list_first_entry(&lockres->l_mask_waiters, in ocfs2_track_lock_wait()
484 ktime_to_us(ktime_mono_to_real(mw->mw_lock_start)); in ocfs2_track_lock_wait()
487 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument
489 mw->mw_lock_start = ktime_get(); in ocfs2_init_start_time()
496 int level, struct ocfs2_mask_waiter *mw, int ret) in ocfs2_update_lock_stats() argument
505 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) in ocfs2_init_start_time() argument
891 struct ocfs2_mask_waiter *mw, *tmp; in lockres_set_flags() local
[all …]
/Linux-v6.1/arch/mips/txx9/generic/
Dmem_tx4927.c46 unsigned int mw = 0; in tx4927_process_sdccr() local
61 mw = 8 >> sdccr_mw; in tx4927_process_sdccr()
64 return rs * cs * mw * bs; in tx4927_process_sdccr()
/Linux-v6.1/drivers/infiniband/hw/hns/
Dhns_roce_mr.c459 struct hns_roce_mw *mw) in hns_roce_mw_free() argument
464 if (mw->enabled) { in hns_roce_mw_free()
466 key_to_hw_index(mw->rkey) & in hns_roce_mw_free()
472 key_to_hw_index(mw->rkey)); in hns_roce_mw_free()
476 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free()
480 struct hns_roce_mw *mw) in hns_roce_mw_enable() argument
485 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable()
499 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); in hns_roce_mw_enable()
512 mw->enabled = 1; in hns_roce_mw_enable()
532 struct hns_roce_mw *mw = to_hr_mw(ibmw); in hns_roce_alloc_mw() local
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx4/
Dmr.c815 struct mlx4_mw *mw) in mlx4_mw_alloc() argument
829 mw->key = hw_index_to_key(index); in mlx4_mw_alloc()
830 mw->pd = pd; in mlx4_mw_alloc()
831 mw->type = type; in mlx4_mw_alloc()
832 mw->enabled = MLX4_MPT_DISABLED; in mlx4_mw_alloc()
838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) in mlx4_mw_enable() argument
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); in mlx4_mw_enable()
858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); in mlx4_mw_enable()
859 mpt_entry->pd_flags = cpu_to_be32(mw->pd); in mlx4_mw_enable()
860 if (mw->type == MLX4_MW_TYPE_2) { in mlx4_mw_enable()
[all …]
/Linux-v6.1/drivers/clk/rockchip/
Dclk.h482 #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\ argument
493 .mux_width = mw, \
503 #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \ argument
514 .mux_width = mw, \
562 #define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \ argument
573 .mux_width = mw, \
580 #define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf, \ argument
591 .mux_width = mw, \
600 mw, mf, ds, dw, df, dt) \ argument
610 .mux_width = mw, \
[all …]
/Linux-v6.1/drivers/net/ethernet/marvell/octeontx2/af/
Drvu.c33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2116 struct mbox_wq_info *mw; in __rvu_mbox_handler() local
2121 mw = &rvu->afpf_wq_info; in __rvu_mbox_handler()
2124 mw = &rvu->afvf_wq_info; in __rvu_mbox_handler()
2130 devid = mwork - mw->mbox_wrk; in __rvu_mbox_handler()
2131 mbox = &mw->mbox; in __rvu_mbox_handler()
2136 if (mw->mbox_wrk[devid].num_msgs == 0) in __rvu_mbox_handler()
2141 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { in __rvu_mbox_handler()
2174 mw->mbox_wrk[devid].num_msgs = 0; in __rvu_mbox_handler()
2200 struct mbox_wq_info *mw; in __rvu_mbox_up_handler() local
[all …]
/Linux-v6.1/include/linux/usb/
Dpd.h249 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) argument
374 #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT) argument
375 #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT) argument
/Linux-v6.1/drivers/infiniband/hw/mlx4/
Dmr.c616 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_alloc_mw() local
620 to_mlx4_type(ibmw->type), &mw->mmw); in mlx4_ib_alloc_mw()
624 err = mlx4_mw_enable(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw()
628 ibmw->rkey = mw->mmw.key; in mlx4_ib_alloc_mw()
632 mlx4_mw_free(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw()
638 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_dealloc_mw() local
640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw()
/Linux-v6.1/drivers/mtd/maps/
Dphysmap-core.c147 map_word mw; in physmap_addr_gpios_read() local
155 mw.x[0] = word; in physmap_addr_gpios_read()
156 return mw; in physmap_addr_gpios_read()
181 static void physmap_addr_gpios_write(struct map_info *map, map_word mw, in physmap_addr_gpios_write() argument
192 word = mw.x[0]; in physmap_addr_gpios_write()
/Linux-v6.1/drivers/pci/endpoint/functions/
Dpci-epf-ntb.c236 enum pci_epc_interface_type type, u32 mw) in epf_ntb_configure_mw() argument
252 peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; in epf_ntb_configure_mw()
259 if (mw + NTB_MW_OFFSET == BAR_DB_MW1) in epf_ntb_configure_mw()
262 if (size > ntb->mws_size[mw]) { in epf_ntb_configure_mw()
265 pci_epc_interface_string(type), mw, size, in epf_ntb_configure_mw()
266 ntb->mws_size[mw]); in epf_ntb_configure_mw()
278 pci_epc_interface_string(type), mw); in epf_ntb_configure_mw()
295 enum pci_epc_interface_type type, u32 mw) in epf_ntb_teardown_mw() argument
309 peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; in epf_ntb_teardown_mw()
314 if (mw + NTB_MW_OFFSET == BAR_DB_MW1) in epf_ntb_teardown_mw()
Dpci-epf-vntb.c199 static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) in epf_ntb_configure_mw() argument
206 phys_addr = ntb->vpci_mw_phy[mw]; in epf_ntb_configure_mw()
216 "Failed to map memory window %d address\n", mw); in epf_ntb_configure_mw()
228 static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) in epf_ntb_teardown_mw() argument
233 ntb->vpci_mw_phy[mw]); in epf_ntb_teardown_mw()
/Linux-v6.1/drivers/infiniband/hw/bnxt_re/
Dib_verbs.h53 struct ib_mw *mw; member
206 int bnxt_re_dealloc_mw(struct ib_mw *mw);
Dib_verbs.c409 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); in bnxt_re_create_fence_wqe()
447 if (fence->mw) { in bnxt_re_destroy_fence_mr()
448 bnxt_re_dealloc_mw(fence->mw); in bnxt_re_destroy_fence_mr()
449 fence->mw = NULL; in bnxt_re_destroy_fence_mr()
475 struct ib_mw *mw; in bnxt_re_create_fence_mr() local
519 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); in bnxt_re_create_fence_mr()
520 if (IS_ERR(mw)) { in bnxt_re_create_fence_mr()
523 rc = PTR_ERR(mw); in bnxt_re_create_fence_mr()
526 fence->mw = mw; in bnxt_re_create_fence_mr()
3724 struct bnxt_re_mw *mw; in bnxt_re_alloc_mw() local
[all …]
/Linux-v6.1/include/dt-bindings/usb/
Dpd.h44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) argument
/Linux-v6.1/drivers/infiniband/hw/mlx5/
Dmr.c1906 struct mlx5_ib_mw *mw = to_mmw(ibmw); in mlx5_ib_alloc_mw() local
1948 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); in mlx5_ib_alloc_mw()
1952 mw->mmkey.type = MLX5_MKEY_MW; in mlx5_ib_alloc_mw()
1953 ibmw->rkey = mw->mmkey.key; in mlx5_ib_alloc_mw()
1954 mw->mmkey.ndescs = ndescs; in mlx5_ib_alloc_mw()
1965 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); in mlx5_ib_alloc_mw()
1974 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); in mlx5_ib_alloc_mw()
1980 int mlx5_ib_dealloc_mw(struct ib_mw *mw) in mlx5_ib_dealloc_mw() argument
1982 struct mlx5_ib_dev *dev = to_mdev(mw->device); in mlx5_ib_dealloc_mw()
1983 struct mlx5_ib_mw *mmw = to_mmw(mw); in mlx5_ib_dealloc_mw()
/Linux-v6.1/drivers/media/platform/nxp/dw100/
Ddw100.c382 u32 sw, sh, mw, mh, idx; in dw100_ctrl_dewarping_map_init() local
389 mw = ctrl->dims[0]; in dw100_ctrl_dewarping_map_init()
394 qdx = qsw / (mw - 1); in dw100_ctrl_dewarping_map_init()
397 ctx->map_width = mw; in dw100_ctrl_dewarping_map_init()
399 ctx->map_size = mh * mw * sizeof(u32); in dw100_ctrl_dewarping_map_init()
402 qy = min_t(u32, (idx / mw) * qdy, qsh); in dw100_ctrl_dewarping_map_init()
403 qx = min_t(u32, (idx % mw) * qdx, qsw); in dw100_ctrl_dewarping_map_init()
/Linux-v6.1/include/uapi/rdma/
Drdma_user_rxe.h113 } mw; member
/Linux-v6.1/drivers/infiniband/core/
Duverbs_main.c105 int uverbs_dealloc_mw(struct ib_mw *mw) in uverbs_dealloc_mw() argument
107 struct ib_pd *pd = mw->pd; in uverbs_dealloc_mw()
110 ret = mw->device->ops.dealloc_mw(mw); in uverbs_dealloc_mw()
115 kfree(mw); in uverbs_dealloc_mw()
Duverbs.h245 int uverbs_dealloc_mw(struct ib_mw *mw);

12