/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 44 struct mlx5_core_dct *dct); 128 struct mlx5_core_dct *dct; in rsc_event_notifier() local 138 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in rsc_event_notifier() 183 dct = (struct mlx5_core_dct *)common; in rsc_event_notifier() 185 complete(&dct->drained); in rsc_event_notifier() 234 struct mlx5_core_dct *dct, bool need_cleanup) in _mlx5_core_destroy_dct() argument 238 struct mlx5_core_qp *qp = &dct->mqp; in _mlx5_core_destroy_dct() 241 err = mlx5_core_drain_dct(dev, dct); in _mlx5_core_destroy_dct() 252 wait_for_completion(&dct->drained); in _mlx5_core_destroy_dct() 255 destroy_resource_common(dev, &dct->mqp); in _mlx5_core_destroy_dct() [all …]
|
D | main.c | 571 if (MLX5_CAP_GEN_MAX(dev, dct)) in handle_hca_cap() 572 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); in handle_hca_cap()
|
D | eq.c | 544 if (MLX5_CAP_GEN_MAX(dev, dct)) in gather_async_events_mask()
|
/Linux-v5.4/drivers/edac/ |
D | amd64_edac.c | 90 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) in f15h_select_dct() argument 96 reg |= dct; in f15h_select_dct() 114 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct, in amd64_read_dct_pci_cfg() argument 119 if (dct || offset >= 0x100) in amd64_read_dct_pci_cfg() 124 if (dct) { in amd64_read_dct_pci_cfg() 142 dct = (dct && pvt->model == 0x30) ? 3 : dct; in amd64_read_dct_pci_cfg() 143 f15h_select_dct(pvt, dct); in amd64_read_dct_pci_cfg() 147 if (dct) in amd64_read_dct_pci_cfg() 391 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, in get_cs_base_and_mask() argument 398 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask() [all …]
|
D | amd64_edac.h | 174 #define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) argument 175 #define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE) argument 475 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
|
/Linux-v5.4/drivers/soc/fsl/dpio/ |
D | qbman-portal.h | 157 enum qbman_pull_type_e dct); 159 enum qbman_pull_type_e dct);
|
D | qbman-portal.c | 626 enum qbman_pull_type_e dct) in qbman_pull_desc_set_wq() argument 628 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; in qbman_pull_desc_set_wq() 640 enum qbman_pull_type_e dct) in qbman_pull_desc_set_channel() argument 642 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; in qbman_pull_desc_set_channel()
|
/Linux-v5.4/include/linux/mlx5/ |
D | qp.h | 575 struct mlx5_core_dct *dct); 578 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
|
D | device.h | 707 struct mlx5_eqe_dct dct; member
|
D | mlx5_ifc.h | 1283 u8 dct[0x1]; member
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | qp.c | 2563 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); in mlx5_ib_create_dct() 2564 if (!qp->dct.in) { in mlx5_ib_create_dct() 2569 MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid); in mlx5_ib_create_dct() 2570 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); in mlx5_ib_create_dct() 2619 if (!MLX5_CAP_GEN(dev->mdev, dct)) { in set_mlx_qp_type() 2758 err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct); in mlx5_ib_destroy_dct() 2765 kfree(mqp->dct.in); in mlx5_ib_destroy_dct() 3815 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); in mlx5_ib_modify_dct() 3869 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, in mlx5_ib_modify_dct() 3874 resp.dctn = qp->dct.mdct.mqp.qpn; in mlx5_ib_modify_dct() [all …]
|
D | mlx5_ib.h | 405 struct mlx5_ib_dct dct; member
|
D | devx.c | 630 qp->dct.mdct.mqp.qpn) == obj_id; in devx_is_valid_obj_id() 2290 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
|
/Linux-v5.4/arch/ia64/include/asm/ |
D | pal.h | 1293 dct :4, /* Date cache tracking */ member
|