/Linux-v5.15/drivers/infiniband/hw/hns/ |
D | hns_roce_alloc.c | 68 u32 page_shift, u32 flags) in hns_roce_buf_alloc() argument 77 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT)) in hns_roce_buf_alloc() 85 buf->page_shift = page_shift; in hns_roce_buf_alloc() 86 page_size = 1 << buf->page_shift; in hns_roce_buf_alloc() 135 unsigned int page_shift) in hns_roce_get_kmem_bufs() argument 141 if (page_shift > buf->trunk_shift) { in hns_roce_get_kmem_bufs() 143 page_shift, buf->trunk_shift); in hns_roce_get_kmem_bufs() 151 offset += (1 << page_shift); in hns_roce_get_kmem_bufs() 159 unsigned int page_shift) in hns_roce_get_umem_bufs() argument 165 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs()
|
D | hns_roce_mr.c | 116 buf_attr.page_shift = is_fast ? PAGE_SHIFT : in alloc_mr_pbl() 663 unsigned int page_shift) in mtr_check_direct_pages() argument 665 size_t page_size = 1 << page_shift; in mtr_check_direct_pages() 711 buf_attr->page_shift, in mtr_alloc_bufs() 725 int page_count, unsigned int page_shift) in mtr_map_bufs() argument 739 mtr->umem, page_shift); in mtr_map_bufs() 742 mtr->kmem, page_shift); in mtr_map_bufs() 752 ret = mtr_check_direct_pages(pages, npage, page_shift); in mtr_map_bufs() 892 unsigned int page_shift; in mtr_init_buf_cfg() local 905 page_shift = HNS_HW_PAGE_SHIFT; in mtr_init_buf_cfg() [all …]
|
D | hns_roce_device.h | 302 unsigned int page_shift; /* buffer page shift */ member 402 unsigned int page_shift; member 1079 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift); in hns_roce_buf_page() 1089 static inline u32 to_hr_hw_page_shift(u32 page_shift) in to_hr_hw_page_shift() argument 1091 return page_shift - HNS_HW_PAGE_SHIFT; in to_hr_hw_page_shift() 1144 unsigned int page_shift, struct ib_udata *udata, 1198 u32 page_shift, u32 flags); 1202 unsigned int page_shift); 1205 unsigned int page_shift);
|
/Linux-v5.15/drivers/infiniband/core/ |
D | umem_odp.c | 61 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 75 ndmas = (end - start) >> umem_odp->page_shift; in ib_init_umem_odp() 136 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_implicit() 184 odp_data->page_shift = PAGE_SHIFT; in ib_umem_odp_alloc_child() 247 umem_odp->page_shift = PAGE_SHIFT; in ib_umem_odp_get() 250 umem_odp->page_shift = HPAGE_SHIFT; in ib_umem_odp_get() 318 *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift, in ib_umem_odp_map_dma_single_page() 356 unsigned int page_shift, hmm_order, pfn_start_idx; in ib_umem_odp_map_dma_and_lock() local 368 page_shift = umem_odp->page_shift; in ib_umem_odp_map_dma_and_lock() 382 range.start = ALIGN_DOWN(user_virt, 1UL << page_shift); in ib_umem_odp_map_dma_and_lock() [all …]
|
/Linux-v5.15/drivers/pci/endpoint/ |
D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order() local 29 size >>= page_shift; in pci_epc_mem_get_order() 53 unsigned int page_shift; in pci_epc_multi_mem_init() local 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 74 pages = windows[i].size >> page_shift; in pci_epc_multi_mem_init() 173 unsigned int page_shift; in pci_epc_mem_alloc_addr() local 188 page_shift = ilog2(mem->window.page_size); in pci_epc_mem_alloc_addr() 190 ((phys_addr_t)pageno << page_shift); in pci_epc_mem_alloc_addr() 238 unsigned int page_shift; in pci_epc_mem_free_addr() local 250 page_shift = ilog2(page_size); in pci_epc_mem_free_addr() [all …]
|
/Linux-v5.15/tools/testing/selftests/powerpc/mm/ |
D | bad_accesses.c | 69 unsigned long i, j, addr, region_shift, page_shift, page_size; in test() local 84 page_shift = 16; in test() 86 page_shift = 12; in test() 103 (1 << page_shift) >> 10, in test() 121 for (j = page_shift - 1; j < 60; j++) { in test() 130 addr = (base | delta) & ~((1 << page_shift) - 1); in test()
|
/Linux-v5.15/arch/powerpc/include/asm/ |
D | ultravisor.h | 50 u64 page_shift) in uv_page_in() argument 53 page_shift); in uv_page_in() 57 u64 page_shift) in uv_page_out() argument 60 page_shift); in uv_page_out() 75 static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) in uv_page_inval() argument 77 return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); in uv_page_inval()
|
D | kvm_book3s_uvmem.h | 15 unsigned long page_shift); 19 unsigned long page_shift); 54 unsigned long flags, unsigned long page_shift) in kvmppc_h_svm_page_in() argument 61 unsigned long flags, unsigned long page_shift) in kvmppc_h_svm_page_out() argument
|
D | iommu.h | 165 __u32 page_shift, 170 __u32 page_shift, 297 extern int iommu_tce_check_ioba(unsigned long page_shift, 300 extern int iommu_tce_check_gpa(unsigned long page_shift,
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/aarch64/ |
D | processor.c | 26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index() 34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index() 35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index() 45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift; in pmd_index() 46 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pmd_index() 56 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pte_index() 57 return (gva >> vm->page_shift) & mask; in pte_index() 62 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift; in pte_addr() 68 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in ptrs_per_pgd() 74 return 1 << (vm->page_shift - 3); in ptrs_per_pte() [all …]
|
/Linux-v5.15/arch/powerpc/kvm/ |
D | book3s_hv_uvmem.c | 507 unsigned long end, unsigned long page_shift, in __kvmppc_svm_page_out() argument 527 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 559 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out() 560 gpa, 0, page_shift); in __kvmppc_svm_page_out() 579 unsigned long page_shift, in kvmppc_svm_page_out() argument 585 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); in kvmppc_svm_page_out() 733 unsigned long page_shift, in kvmppc_svm_page_in() argument 770 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in() 771 gpa, 0, page_shift); in kvmppc_svm_page_in() 868 unsigned long page_shift) in kvmppc_share_page() argument [all …]
|
D | book3s_64_vio.c | 135 if ((tbltmp->it_page_shift <= stt->page_shift) && in kvm_spapr_tce_attach_iommu_group() 137 stt->offset << stt->page_shift) && in kvm_spapr_tce_attach_iommu_group() 139 stt->size << stt->page_shift)) { in kvm_spapr_tce_attach_iommu_group() 288 if (!args->size || args->page_shift < 12 || args->page_shift > 34 || in kvm_vm_ioctl_create_spapr_tce() 289 (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) in kvm_vm_ioctl_create_spapr_tce() 304 stt->page_shift = args->page_shift; in kvm_vm_ioctl_create_spapr_tce() 371 if (iommu_tce_check_gpa(stt->page_shift, gpa)) in kvmppc_tce_validate() 480 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_unmap() 536 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_tce_iommu_map() 584 entry = ioba >> stt->page_shift; in kvmppc_h_put_tce() [all …]
|
D | book3s_64_vio_hv.c | 113 if (iommu_tce_check_gpa(stt->page_shift, gpa)) in kvmppc_rm_tce_validate() 209 idx = (ioba >> stt->page_shift) - stt->offset; in kvmppc_rm_ioba_validate() 310 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_rm_tce_iommu_unmap() 370 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); in kvmppc_rm_tce_iommu_map() 413 entry = ioba >> stt->page_shift; in kvmppc_rm_h_put_tce() 501 entry = ioba >> stt->page_shift; in kvmppc_rm_h_put_tce_indirect() 613 unsigned long entry = ioba >> stt->page_shift; in kvmppc_rm_h_stuff_tce() 630 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) in kvmppc_rm_h_stuff_tce() 631 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); in kvmppc_rm_h_stuff_tce() 635 iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); in kvmppc_rm_h_stuff_tce() [all …]
|
/Linux-v5.15/tools/testing/selftests/vm/ |
D | hmm-tests.c | 60 unsigned int page_shift; in FIXTURE() local 68 unsigned int page_shift; in FIXTURE() local 87 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP() 96 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP() 234 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 236 size = npages << self->page_shift; in TEST_F() 297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 299 size = npages << self->page_shift; in TEST_F() 356 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 358 size = npages << self->page_shift; in TEST_F() [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/ |
D | alloc.c | 81 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; in mlx5_buf_alloc_node() 94 while (t & ((1 << buf->page_shift) - 1)) { in mlx5_buf_alloc_node() 95 --buf->page_shift; in mlx5_buf_alloc_node() 128 buf->page_shift = PAGE_SHIFT; in mlx5_frag_buf_alloc_node() 142 if (frag->map & ((1 << buf->page_shift) - 1)) { in mlx5_frag_buf_alloc_node() 146 &frag->map, buf->page_shift); in mlx5_frag_buf_alloc_node() 295 addr = buf->frags->map + (i << buf->page_shift); in mlx5_fill_page_array()
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/ |
D | kvm_util.c | 243 vm->page_shift = vm_guest_mode_params[mode].page_shift; in vm_create() 304 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create() 306 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_create() 307 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create() 310 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_create() 711 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva() 713 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva() 716 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva() 717 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva() 827 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add() [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/x86_64/ |
D | vmx.c | 408 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, in nested_pg_map() 416 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in nested_pg_map() 429 pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_pg_map() 439 pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_pg_map() 449 pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_pg_map() 458 pte[index[0]].address = paddr >> vm->page_shift; in nested_pg_map() 514 i = (region->region.guest_phys_addr >> vm->page_shift) - 1; in nested_map_memslot() 515 last = i + (region->region.memory_size >> vm->page_shift); in nested_map_memslot() 522 (uint64_t)i << vm->page_shift, in nested_map_memslot() 523 (uint64_t)i << vm->page_shift, in nested_map_memslot() [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 204 mtt->page_shift = page_shift; in mlx4_mtt_init() 419 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 591 int page_shift, struct mlx4_mpt_entry *mpt_entry) in mlx4_mr_rereg_mem_write() argument 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 601 mpt_entry->entity_size = cpu_to_be32(page_shift); in mlx4_mr_rereg_mem_write() [all …]
|
/Linux-v5.15/arch/powerpc/platforms/pseries/ |
D | iommu.c | 507 unsigned long window_size, unsigned long page_shift, in iommu_table_setparms_common() argument 512 tbl->it_offset = win_addr >> page_shift; in iommu_table_setparms_common() 513 tbl->it_size = window_size >> page_shift; in iommu_table_setparms_common() 514 tbl->it_page_shift = page_shift; in iommu_table_setparms_common() 1047 struct ddw_create_response *create, int page_shift, in create_ddw() argument 1071 BUID_LO(buid), page_shift, window_shift); in create_ddw() 1077 BUID_LO(buid), page_shift, window_shift, ret, create->liobn, in create_ddw() 1187 u32 page_shift, u32 window_shift) in ddw_property_create() argument 1209 ddwprop->tce_shift = cpu_to_be32(page_shift); in ddw_property_create() 1232 int page_shift; in enable_ddw() local [all …]
|
/Linux-v5.15/arch/powerpc/platforms/powernv/ |
D | pci-ioda-tce.c | 50 u64 dma_offset, unsigned int page_shift) in pnv_pci_setup_iommu_table() argument 54 tbl->it_page_shift = page_shift; in pnv_pci_setup_iommu_table() 292 __u32 page_shift, __u64 window_size, __u32 levels, in pnv_pci_ioda2_table_alloc_pages() argument 299 unsigned int entries_shift = window_shift - page_shift; in pnv_pci_ioda2_table_alloc_pages() 315 if ((level_shift - 3) * levels + page_shift >= 55) in pnv_pci_ioda2_table_alloc_pages() 349 page_shift); in pnv_pci_ioda2_table_alloc_pages()
|
D | pci.h | 291 extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, 321 __u32 page_shift, __u64 window_size, __u32 levels, 332 u64 dma_offset, unsigned int page_shift);
|
/Linux-v5.15/drivers/mtd/nand/raw/ |
D | nand_bbt.c | 180 from = ((loff_t)page) << this->page_shift; in read_bbt() 396 scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift, in read_abs_bbts() 405 scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift, in read_abs_bbts() 561 int blocktopage = this->bbt_erase_shift - this->page_shift; in search_bbt() 667 (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 695 page = block << (this->bbt_erase_shift - this->page_shift); in get_bbt_block() 798 page = block << (this->bbt_erase_shift - this->page_shift); in write_bbt() 819 to = ((loff_t)page) << this->page_shift; in write_bbt() 835 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; in write_bbt() 842 pageoffs = page - (int)(to >> this->page_shift); in write_bbt() [all …]
|
/Linux-v5.15/drivers/infiniband/sw/rdmavt/ |
D | mr.c | 368 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr() 560 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page() 561 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page() 595 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg() 772 if (mr->page_shift) { in rvt_lkey_ok() 780 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok() 781 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok() 879 if (mr->page_shift) { in rvt_rkey_ok() 887 entries_spanned_by_off = off >> mr->page_shift; in rvt_rkey_ok() 888 off -= (entries_spanned_by_off << mr->page_shift); in rvt_rkey_ok()
|
/Linux-v5.15/include/rdma/ |
D | ib_umem_odp.h | 44 unsigned int page_shift; member 67 umem_odp->page_shift; in ib_umem_odp_num_pages()
|
/Linux-v5.15/drivers/infiniband/hw/mlx5/ |
D | mr.c | 839 static int get_octo_len(u64 addr, u64 len, int page_shift) in get_octo_len() argument 841 u64 page_size = 1ULL << page_shift; in get_octo_len() 846 npages = ALIGN(len + offset, page_size) >> page_shift; in get_octo_len() 980 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr() 1093 wr->page_shift = mr->page_shift; in mlx5_ib_create_xlt_wr() 1123 int page_shift, int flags) in mlx5_ib_update_xlt() argument 1171 wr.page_shift = page_shift; in mlx5_ib_update_xlt() 1222 1 << mr->page_shift), in mlx5_ib_update_mr_pas() 1231 BIT(mr->page_shift)) { in mlx5_ib_update_mr_pas() 1294 mr->page_shift = order_base_2(page_size); in reg_create() [all …]
|