Home
last modified time | relevance | path

Searched refs:pgt (Results 1 – 24 of 24) sorted by relevance

/Linux-v6.6/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum_pgt.c32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0, in mlxsw_sp_pgt_mid_alloc()
34 mlxsw_sp->pgt->end_index, GFP_KERNEL); in mlxsw_sp_pgt_mid_alloc()
42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
46 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free()
53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base)); in mlxsw_sp_pgt_mid_free()
54 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free()
63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range()
70 idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); in mlxsw_sp_pgt_mid_alloc_range()
[all …]
Dspectrum.h213 struct mlxsw_sp_pgt *pgt; member
/Linux-v6.6/arch/arm64/kvm/hyp/
Dpgtable.c109 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in kvm_pgd_page_idx() argument
111 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in kvm_pgd_page_idx()
112 u64 mask = BIT(pgt->ia_bits) - 1; in kvm_pgd_page_idx()
119 struct kvm_pgtable pgt = { in kvm_pgd_pages() local
124 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
297 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) in _kvm_pgtable_walk() argument
301 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk()
306 if (!pgt->pgd) in _kvm_pgtable_walk()
309 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { in _kvm_pgtable_walk()
310 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE]; in _kvm_pgtable_walk()
[all …]
/Linux-v6.6/arch/arm64/include/asm/
Dkvm_pgtable.h362 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
372 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
393 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
415 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
452 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
457 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ argument
458 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
467 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
501 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
535 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
[all …]
Dkvm_host.h159 struct kvm_pgtable *pgt; member
/Linux-v6.6/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c30 struct nvkm_vmm_pt *pgt = *ppgt; in nvkm_vmm_pt_del() local
31 if (pgt) { in nvkm_vmm_pt_del()
32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
33 kfree(pgt); in nvkm_vmm_pt_del()
44 struct nvkm_vmm_pt *pgt; in nvkm_vmm_pt_new() local
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); in nvkm_vmm_pt_new()
63 if (!pgt->pde) { in nvkm_vmm_pt_new()
[all …]
Dvmmgp100.c238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local
242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde()
244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde()
365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local
369 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
Dvmmnv50.c106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument
110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde()
111 switch (pgt->page) { in nv50_vmm_pde()
Dvmmgf100.c108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gf100_vmm_pgd_pde() local
113 if ((pt = pgt->pt[0])) { in gf100_vmm_pgd_pde()
127 if ((pt = pgt->pt[1])) { in gf100_vmm_pgd_pde()
/Linux-v6.6/arch/arm64/kvm/
Dmmu.c66 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local
67 if (!pgt) in stage2_apply_range()
71 ret = fn(pgt, addr, next - addr); in stage2_apply_range()
118 struct kvm_pgtable *pgt; in kvm_mmu_split_huge_pages() local
145 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages()
146 if (!pgt) in kvm_mmu_split_huge_pages()
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache); in kvm_mmu_split_huge_pages()
804 struct kvm_pgtable pgt = { in get_user_mapping_size() local
822 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); in get_user_mapping_size()
870 struct kvm_pgtable *pgt; in kvm_init_stage2_mmu() local
[all …]
/Linux-v6.6/drivers/gpu/drm/nouveau/nvkm/engine/dma/
Dusernv04.c52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local
55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind()
56 nvkm_kmap(pgt); in nv04_dmaobj_bind()
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind()
59 nvkm_done(pgt); in nv04_dmaobj_bind()
/Linux-v6.6/drivers/firmware/efi/libstub/
Dx86-5lvl.c68 u64 *pgt = (void *)la57_toggle + PAGE_SIZE; in efi_5level_switch() local
81 new_cr3 = memset(pgt, 0, PAGE_SIZE); in efi_5level_switch()
89 new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE); in efi_5level_switch()
/Linux-v6.6/arch/arm64/kvm/hyp/nvhe/
Dmem_protect.c151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2()
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2()
158 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2()
258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, in kvm_guest_prepare_stage2()
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2()
275 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_guest_pages()
328 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local
336 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all()
340 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all()
413 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, in __host_stage2_idmap()
[all …]
Dpkvm.c382 mmu->pgt = &hyp_vm->pgt; in insert_vm_table_entry()
/Linux-v6.6/arch/s390/kvm/
Dgaccess.c1382 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument
1436 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables()
1463 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables()
1491 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables()
1528 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables()
1554 *pgt = ptr; in kvm_s390_shadow_tables()
1577 unsigned long pgt = 0; in kvm_s390_shadow_fault() local
1589 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); in kvm_s390_shadow_fault()
1591 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection, in kvm_s390_shadow_fault()
1596 pte.val = pgt + vaddr.px * PAGE_SIZE; in kvm_s390_shadow_fault()
[all …]
/Linux-v6.6/arch/s390/include/asm/
Dgmap.h135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
138 unsigned long *pgt, int *dat_protection, int *fake);
/Linux-v6.6/arch/s390/mm/
Dgmap.c1319 unsigned long *pgt) in __gmap_unshadow_pgt() argument
1325 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt()
1338 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local
1348 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt()
1350 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt()
1352 page = phys_to_page(pgt); in gmap_unshadow_pgt()
1369 phys_addr_t pgt; in __gmap_unshadow_sgt() local
1376 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt()
1378 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt()
1380 page = phys_to_page(pgt); in __gmap_unshadow_sgt()
[all …]
/Linux-v6.6/arch/arm64/kvm/hyp/include/nvhe/
Dpkvm.h35 struct kvm_pgtable pgt; member
Dmem_protect.h50 struct kvm_pgtable pgt; member
/Linux-v6.6/arch/powerpc/kvm/
Dbook3s_64_mmu_radix.c1303 pgd_t *pgt; in debugfs_radix_read() local
1341 pgt = NULL; in debugfs_radix_read()
1345 pgt = NULL; in debugfs_radix_read()
1355 if (!pgt) { in debugfs_radix_read()
1357 pgt = kvm->arch.pgtable; in debugfs_radix_read()
1364 pgt = nested->shadow_pgtable; in debugfs_radix_read()
1373 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read()
1378 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()
/Linux-v6.6/drivers/accel/habanalabs/common/mmu/
Dmmu.c945 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, in hl_mmu_hr_pte_phys_to_virt() argument
951 return pgt->virt_addr + pte_offset; in hl_mmu_hr_pte_phys_to_virt()
/Linux-v6.6/arch/x86/events/intel/
Duncore_nhmex.c877 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
/Linux-v6.6/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_self_test.c78 static int pgt(struct st_pred_args *args) in pgt() function
395 NA, 1, 0, pgt,
/Linux-v6.6/drivers/accel/habanalabs/common/
Dhabanalabs.h3781 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, u64 phys_pte_addr,