Home
last modified time | relevance | path

Searched refs:spt (Results 1 – 20 of 20) sorted by relevance

/Linux-v5.15/drivers/gpu/drm/i915/gvt/
Dgtt.c652 struct intel_vgpu_ppgtt_spt *spt, in ppgtt_spt_get_entry() argument
657 struct intel_gvt *gvt = spt->vgpu->gvt; in ppgtt_spt_get_entry()
667 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_get_entry()
668 spt->vgpu); in ppgtt_spt_get_entry()
673 spt->guest_page.pde_ips : false); in ppgtt_spt_get_entry()
681 struct intel_vgpu_ppgtt_spt *spt, in ppgtt_spt_set_entry() argument
686 struct intel_gvt *gvt = spt->vgpu->gvt; in ppgtt_spt_set_entry()
696 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_set_entry()
697 spt->vgpu); in ppgtt_spt_set_entry()
700 #define ppgtt_get_guest_entry(spt, e, index) \ argument
[all …]
Dtrace.h43 TP_PROTO(int id, void *spt, int type, unsigned long mfn,
46 TP_ARGS(id, spt, type, mfn, gpt_gfn),
50 __field(void *, spt)
58 __entry->spt = spt;
66 __entry->spt,
73 TP_PROTO(int id, void *spt, int type),
75 TP_ARGS(id, spt, type),
79 __field(void *, spt)
85 __entry->spt = spt;
91 __entry->spt,
[all …]
Dgtt.h235 struct intel_vgpu_ppgtt_spt *spt; member
/Linux-v5.15/arch/powerpc/mm/book3s64/
Dsubpage_prot.c24 struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); in subpage_prot_free() local
28 if (!spt) in subpage_prot_free()
32 if (spt->low_prot[i]) { in subpage_prot_free()
33 free_page((unsigned long)spt->low_prot[i]); in subpage_prot_free()
34 spt->low_prot[i] = NULL; in subpage_prot_free()
39 p = spt->protptrs[i]; in subpage_prot_free()
42 spt->protptrs[i] = NULL; in subpage_prot_free()
43 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; in subpage_prot_free()
49 spt->maxaddr = 0; in subpage_prot_free()
50 kfree(spt); in subpage_prot_free()
[all …]
Dmmu_context.c123 if (current->mm->context.hash_context->spt) { in hash__init_new_context()
124 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context()
126 if (!mm->context.hash_context->spt) { in hash__init_new_context()
137 kfree(mm->context.hash_context->spt); in hash__init_new_context()
Dhash_utils.c1221 struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); in subpage_protection() local
1225 if (!spt) in subpage_protection()
1228 if (ea >= spt->maxaddr) in subpage_protection()
1232 sbpm = spt->low_prot; in subpage_protection()
1234 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; in subpage_protection()
/Linux-v5.15/arch/x86/kvm/mmu/
Dmmu_audit.c40 u64 *ent = sp->spt; in __mmu_spte_walk()
113 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings()
136 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap()
145 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap()
183 if (!is_shadow_present_pte(sp->spt[i])) in check_mappings_rmap()
186 inspect_spte_has_rmap(kvm, sp->spt + i); in check_mappings_rmap()
Dpaging_tmpl.h646 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in FNAME()
647 spte = sp->spt + i; in FNAME()
996 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); in FNAME()
1111 if (!sp->spt[i]) in FNAME()
1120 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { in FNAME()
1130 if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, in FNAME()
1135 drop_spte(vcpu->kvm, &sp->spt[i]); in FNAME()
1142 host_writable = sp->spt[i] & shadow_host_writable_mask; in FNAME()
1144 set_spte_ret |= set_spte(vcpu, &sp->spt[i], in FNAME()
1146 gfn, spte_to_pfn(sp->spt[i]), in FNAME()
Dtdp_mmu.c62 free_page((unsigned long)sp->spt); in tdp_mmu_free_sp()
180 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); in alloc_tdp_mmu_page()
181 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); in alloc_tdp_mmu_page()
217 return __pa(root->spt); in kvm_tdp_mmu_get_vcpu_root_hpa()
661 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
762 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in zap_gfn_range()
1050 child_pt = sp->spt; in kvm_tdp_mmu_map()
1231 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in wrprot_gfn_range()
1497 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in write_protect_gfn()
Dmmu_internal.h53 u64 *spt; member
Dmmu.c1081 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1097 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1684 static int is_empty_shadow_page(u64 *spt) in is_empty_shadow_page() argument
1689 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) in is_empty_shadow_page()
1713 MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); in kvm_mmu_free_page()
1716 free_page((unsigned long)sp->spt); in kvm_mmu_free_page()
1754 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); in kvm_mmu_alloc_page()
1757 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); in kvm_mmu_alloc_page()
1787 index = spte - sp->spt; in mark_unsync()
1841 u64 ent = sp->spt[i]; in __mmu_unsync_walk()
[all …]
/Linux-v5.15/drivers/block/
Dataflop.c170 unsigned spt; /* sectors per track */ member
753 if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) { in do_format()
758 nsect = UDT->spt; in do_format()
1021 dma_wd.fdc_acces_seccount = read_track ? SUDT->spt : 1; in fd_rwsec()
1086 if (addr >= PhysTrackBuffer + SUDT->spt*512) { in fd_readtrack_check()
1154 !(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) { in fd_rwsec_done1()
1463 ReqTrack = block / UDT->spt; in setup_req_params()
1464 ReqSector = block - ReqTrack * UDT->spt + 1; in setup_req_params()
1597 getprm.sect = dtp->spt; in fd_locked_ioctl()
1599 getprm.track = dtp->blocks/dtp->spt/2; in fd_locked_ioctl()
[all …]
/Linux-v5.15/drivers/scsi/esas2r/
Desas2r_ioctl.c691 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; in scsi_passthru_comp_cb() local
694 spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; in scsi_passthru_comp_cb()
695 spt->sense_length = rq->sense_len; in scsi_passthru_comp_cb()
696 spt->residual_length = in scsi_passthru_comp_cb()
734 spt->req_status = sts; in scsi_passthru_comp_cb()
737 spt->target_id = in scsi_passthru_comp_cb()
738 esas2r_targ_db_find_next_present(a, (u16)spt->target_id); in scsi_passthru_comp_cb()
982 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; in hba_ioctl_callback() local
985 memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); in hba_ioctl_callback()
1000 if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { in hba_ioctl_callback()
[all …]
/Linux-v5.15/kernel/
Dtorture.c726 int spt; in stutter_wait() local
729 spt = READ_ONCE(stutter_pause_test); in stutter_wait()
730 for (; spt; spt = READ_ONCE(stutter_pause_test)) { in stutter_wait()
735 if (spt == 1) { in stutter_wait()
737 } else if (spt == 2) { in stutter_wait()
/Linux-v5.15/Documentation/virt/kvm/
Dmmu.rst208 spt:
211 The page pointed to by spt will have its page->private pointing back
213 sptes in spt point either at guest pages, or at lower-level shadow pages.
214 Specifically, if sp1 and sp2 are shadow pages, then sp1->spt[n] may point
215 at __pa(sp2->spt). sp2 will point back at sp1 through parent_pte.
216 The spt array forms a DAG structure with the shadow page as a node, and
228 The reverse mapping for the pte/ptes pointing at this page's spt. If
243 A bitmap indicating which sptes in spt point (directly or indirectly) at
/Linux-v5.15/arch/powerpc/include/asm/book3s/64/
Dmmu.h186 return ctx->hash_context->spt; in mm_ctx_subpage_prot()
Dmmu-hash.h723 struct subpage_prot_table *spt; member
/Linux-v5.15/arch/s390/boot/
Dhead.S322 spt 6f-.LPG0(%r13)
/Linux-v5.15/drivers/scsi/qla2xxx/
Dqla_isr.c2908 struct t10_pi_tuple *spt; in qla2x00_handle_dif_error() local
2930 spt = page_address(sg_page(sg)) + sg->offset; in qla2x00_handle_dif_error()
2931 spt += j; in qla2x00_handle_dif_error()
2933 spt->app_tag = T10_PI_APP_ESCAPE; in qla2x00_handle_dif_error()
2935 spt->ref_tag = T10_PI_REF_ESCAPE; in qla2x00_handle_dif_error()
/Linux-v5.15/arch/s390/tools/
Dopcodes.txt199 b208 spt S_RD