/Linux-v5.4/tools/testing/selftests/kvm/x86_64/ |
D | hyperv_cpuid.c | 54 TEST_ASSERT(hv_cpuid_entries->nent == 6, in test_hv_cpuid() 57 hv_cpuid_entries->nent); in test_hv_cpuid() 59 TEST_ASSERT(hv_cpuid_entries->nent == 7, in test_hv_cpuid() 62 hv_cpuid_entries->nent); in test_hv_cpuid() 64 for (i = 0; i < hv_cpuid_entries->nent; i++) { in test_hv_cpuid() 102 static struct kvm_cpuid2 cpuid = {.nent = 0}; in test_hv_cpuid_e2big() 115 int nent = 20; /* should be enough */ in kvm_get_supported_hv_cpuid() local 118 cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2)); in kvm_get_supported_hv_cpuid() 125 cpuid->nent = nent; in kvm_get_supported_hv_cpuid()
|
/Linux-v5.4/arch/x86/kvm/ |
D | cpuid.c | 206 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid() 209 if (cpuid->nent) { in kvm_vcpu_ioctl_set_cpuid() 212 cpuid->nent)); in kvm_vcpu_ioctl_set_cpuid() 217 cpuid->nent * sizeof(struct kvm_cpuid_entry))) in kvm_vcpu_ioctl_set_cpuid() 220 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid() 232 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid() 250 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid2() 254 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) in kvm_vcpu_ioctl_set_cpuid2() 256 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid2() 271 if (cpuid->nent < vcpu->arch.cpuid_nent) in kvm_vcpu_ioctl_get_cpuid2() [all …]
|
D | hyperv.c | 1797 int i, nent = ARRAY_SIZE(cpuid_entries); in kvm_vcpu_ioctl_get_hv_cpuid() local 1804 --nent; in kvm_vcpu_ioctl_get_hv_cpuid() 1806 if (cpuid->nent < nent) in kvm_vcpu_ioctl_get_hv_cpuid() 1809 if (cpuid->nent > nent) in kvm_vcpu_ioctl_get_hv_cpuid() 1810 cpuid->nent = nent; in kvm_vcpu_ioctl_get_hv_cpuid() 1812 for (i = 0; i < nent; i++) { in kvm_vcpu_ioctl_get_hv_cpuid() 1908 nent * sizeof(struct kvm_cpuid_entry2))) in kvm_vcpu_ioctl_get_hv_cpuid()
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | gk20a.c | 42 int nent; in gk20a_gr_av_to_init() local 50 nent = (fuc.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_init() 51 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_av_to_init() 60 for (i = 0; i < nent; i++) { in gk20a_gr_av_to_init() 91 int nent; in gk20a_gr_aiv_to_init() local 99 nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); in gk20a_gr_aiv_to_init() 100 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_aiv_to_init() 109 for (i = 0; i < nent; i++) { in gk20a_gr_aiv_to_init() 136 int nent; in gk20a_gr_av_to_method() local 144 nent = (fuc.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_method() [all …]
|
/Linux-v5.4/scripts/ |
D | conmakehash.c | 82 int i, nuni, nent; in main() local 272 nent = 0; in main() 275 while ( nent >= unicount[fp0] ) in main() 278 nent = 0; in main() 280 printf("0x%04x", unitable[fp0][nent++]); in main()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 236 for (i = 0; i < eq->nent; i++) { in init_eq_buf() 262 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); in create_map_eq() 264 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); in create_map_eq() 291 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); in create_map_eq() 578 .nent = MLX5_NUM_CMD_EQE, in create_async_eqs() 597 .nent = MLX5_NUM_ASYNC_EQE, in create_async_eqs() 616 .nent = /* TODO: sriov max_vf + */ 1, in create_async_eqs() 738 eqe = get_eqe(eq, ci & (eq->nent - 1)); in mlx5_eq_get_eqe() 739 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; in mlx5_eq_get_eqe() 785 int nent; in create_comp_eqs() local [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | eq.h | 34 int nent; member 57 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw() 59 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw()
|
/Linux-v5.4/drivers/infiniband/hw/qib/ |
D | qib_pcie.c | 211 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) in qib_pcie_params() argument 229 maxvec = (nent && *nent) ? *nent : 1; in qib_pcie_params() 239 if (nent) in qib_pcie_params() 240 *nent = !dd->pcidev->msix_enabled ? 0 : nvec; in qib_pcie_params()
|
/Linux-v5.4/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 160 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument 162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init() 178 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument 182 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
|
D | mthca_eq.c | 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 466 int nent, in mthca_create_eq() argument 479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq() 480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq() 511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq() 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq() 560 eq->eqn, eq->nent); in mthca_create_eq() 593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
|
D | mthca_cq.c | 348 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument 353 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf() 360 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf() 768 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument 776 cq->ibcq.cqe = nent - 1; in mthca_init_cq() 812 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq() 826 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
|
D | mthca_dev.h | 422 int mthca_array_init(struct mthca_array *array, int nent); 423 void mthca_array_cleanup(struct mthca_array *array, int nent); 498 int mthca_init_cq(struct mthca_dev *dev, int nent, 509 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
|
D | mthca_cmd.c | 664 int nent = 0; in mthca_map_cmd() local 694 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd() 698 pages[nent * 2 + 1] = in mthca_map_cmd() 704 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd() 705 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd() 709 nent = 0; in mthca_map_cmd() 714 if (nent) in mthca_map_cmd() 715 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 782 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 785 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 807 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 809 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 819 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 822 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 969 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument 983 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq() [all …]
|
D | cq.c | 341 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument 381 cpu_to_be32((ilog2(nent) << 24) | in mlx4_cq_alloc() 393 err = mlx4_init_user_cqes(buf_addr, nent, in mlx4_cq_alloc() 398 mlx4_init_kernel_cqes(buf_addr, nent, in mlx4_cq_alloc()
|
/Linux-v5.4/drivers/infiniband/hw/hns/ |
D | hns_roce_cq.c | 84 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, in hns_roce_cq_alloc() argument 144 nent, vector); in hns_roce_cq_alloc() 255 struct hns_roce_cq_buf *buf, u32 nent) in hns_roce_ib_alloc_cq_buf() argument 260 ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, in hns_roce_ib_alloc_cq_buf() 286 hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, in hns_roce_ib_alloc_cq_buf()
|
/Linux-v5.4/lib/ |
D | scatterlist.c | 492 unsigned int nent, nalloc; in sgl_alloc_order() local 495 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); in sgl_alloc_order() 497 if (length > (nent << (PAGE_SHIFT + order))) in sgl_alloc_order() 499 nalloc = nent; in sgl_alloc_order() 527 *nent_p = nent; in sgl_alloc_order()
|
/Linux-v5.4/drivers/tty/serial/ |
D | pch_uart.c | 235 int nent; member 784 for (i = 0; i < priv->nent; i++, sg++) { in pch_dma_tx_complete() 790 dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); in pch_dma_tx_complete() 792 priv->nent = 0; in pch_dma_tx_complete() 932 int nent; in dma_handle_tx() local 1008 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx() 1009 if (!nent) { in dma_handle_tx() 1013 priv->nent = nent; in dma_handle_tx() 1015 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx() 1020 if (i == (nent - 1)) in dma_handle_tx() [all …]
|
/Linux-v5.4/include/linux/mlx5/ |
D | eq.h | 17 int nent; member
|
/Linux-v5.4/drivers/iommu/ |
D | omap-iommu.c | 716 int nent = 1; in iopgtable_clear_entry_core() local 730 nent *= 16; in iopgtable_clear_entry_core() 734 bytes *= nent; in iopgtable_clear_entry_core() 735 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core() 737 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); in iopgtable_clear_entry_core() 748 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core() 752 nent *= 16; in iopgtable_clear_entry_core() 756 bytes *= nent; in iopgtable_clear_entry_core() 758 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core() 759 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); in iopgtable_clear_entry_core()
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | cq.c | 73 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument 75 return (n & nent) ? 1 : 0; in sw_ownership_bit() 636 int nent, in alloc_cq_frag_buf() argument 645 nent * cqe_size, in alloc_cq_frag_buf() 654 buf->nent = nent; in alloc_cq_frag_buf() 815 for (i = 0; i < buf->nent; i++) { in init_cq_frag_buf() 1182 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes() 1184 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
|
/Linux-v5.4/arch/x86/include/uapi/asm/ |
D | kvm.h | 207 __u32 nent; member 229 __u32 nent; member
|
/Linux-v5.4/tools/arch/x86/include/uapi/asm/ |
D | kvm.h | 207 __u32 nent; member 229 __u32 nent; member
|
/Linux-v5.4/tools/testing/selftests/kvm/lib/x86_64/ |
D | processor.c | 690 int nent = 100; in allocate_kvm_cpuid2() local 694 size += nent * sizeof(struct kvm_cpuid_entry2); in allocate_kvm_cpuid2() 701 cpuid->nent = nent; in allocate_kvm_cpuid2() 756 for (i = 0; i < cpuid->nent; i++) { in kvm_get_supported_cpuid_index()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 1213 int nent = 0; in mlxsw_pci_fw_area_init() local 1235 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); in mlxsw_pci_fw_area_init() 1236 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ in mlxsw_pci_fw_area_init() 1237 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { in mlxsw_pci_fw_area_init() 1238 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init() 1241 nent = 0; in mlxsw_pci_fw_area_init() 1246 if (nent) { in mlxsw_pci_fw_area_init() 1247 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
|