Lines Matching refs:range
130 struct hmm_range *range; in hmm_invalidate_range() local
133 list_for_each_entry(range, &hmm->ranges, list) { in hmm_invalidate_range()
136 if (end < range->start || start >= range->end) in hmm_invalidate_range()
139 range->valid = false; in hmm_invalidate_range()
140 addr = max(start, range->start); in hmm_invalidate_range()
141 idx = (addr - range->start) >> PAGE_SHIFT; in hmm_invalidate_range()
142 npages = (min(range->end, end) - addr) >> PAGE_SHIFT; in hmm_invalidate_range()
143 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); in hmm_invalidate_range()
292 struct hmm_range *range; member
303 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_do_fault() local
313 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_do_fault()
325 struct hmm_range *range = hmm_vma_walk->range; in hmm_pfns_bad() local
326 uint64_t *pfns = range->pfns; in hmm_pfns_bad()
329 i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_bad()
331 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad()
353 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole_() local
354 uint64_t *pfns = range->pfns; in hmm_vma_walk_hole_()
358 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole_()
360 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_()
378 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local
385 if (!(pfns & range->flags[HMM_PFN_VALID])) in hmm_pte_need_fault()
388 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { in hmm_pte_need_fault()
390 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { in hmm_pte_need_fault()
391 *write_fault = pfns & range->flags[HMM_PFN_WRITE]; in hmm_pte_need_fault()
398 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); in hmm_pte_need_fault()
400 if ((pfns & range->flags[HMM_PFN_WRITE]) && in hmm_pte_need_fault()
401 !(cpu_flags & range->flags[HMM_PFN_WRITE])) { in hmm_pte_need_fault()
431 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local
436 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
438 pfns = &range->pfns[i]; in hmm_vma_walk_hole()
444 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) in pmd_to_hmm_pfn_flags() argument
448 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | in pmd_to_hmm_pfn_flags()
449 range->flags[HMM_PFN_WRITE] : in pmd_to_hmm_pfn_flags()
450 range->flags[HMM_PFN_VALID]; in pmd_to_hmm_pfn_flags()
460 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pmd() local
466 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
475 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; in hmm_vma_handle_pmd()
480 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) in pte_to_hmm_pfn_flags() argument
484 return pte_write(pte) ? range->flags[HMM_PFN_VALID] | in pte_to_hmm_pfn_flags()
485 range->flags[HMM_PFN_WRITE] : in pte_to_hmm_pfn_flags()
486 range->flags[HMM_PFN_VALID]; in pte_to_hmm_pfn_flags()
494 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_handle_pte() local
501 *pfn = range->values[HMM_PFN_NONE]; in hmm_vma_handle_pte()
502 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
526 cpu_flags = range->flags[HMM_PFN_VALID] | in hmm_vma_handle_pte()
527 range->flags[HMM_PFN_DEVICE_PRIVATE]; in hmm_vma_handle_pte()
529 range->flags[HMM_PFN_WRITE] : 0; in hmm_vma_handle_pte()
534 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); in hmm_vma_handle_pte()
551 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_handle_pte()
558 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; in hmm_vma_handle_pte()
573 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_pmd() local
574 uint64_t *pfns = range->pfns; in hmm_vma_walk_pmd()
578 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pmd()
584 if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB)) in hmm_vma_walk_pmd()
627 static void hmm_pfns_clear(struct hmm_range *range, in hmm_pfns_clear() argument
633 *pfns = range->values[HMM_PFN_NONE]; in hmm_pfns_clear()
636 static void hmm_pfns_special(struct hmm_range *range) in hmm_pfns_special() argument
638 unsigned long addr = range->start, i = 0; in hmm_pfns_special()
640 for (; addr < range->end; addr += PAGE_SIZE, i++) in hmm_pfns_special()
641 range->pfns[i] = range->values[HMM_PFN_SPECIAL]; in hmm_pfns_special()
661 int hmm_vma_get_pfns(struct hmm_range *range) in hmm_vma_get_pfns() argument
663 struct vm_area_struct *vma = range->vma; in hmm_vma_get_pfns()
669 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_get_pfns()
671 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_get_pfns()
684 hmm_pfns_special(range); in hmm_vma_get_pfns()
695 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_get_pfns()
701 range->valid = true; in hmm_vma_get_pfns()
702 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_get_pfns()
706 hmm_vma_walk.range = range; in hmm_vma_get_pfns()
717 walk_page_range(range->start, range->end, &mm_walk); in hmm_vma_get_pfns()
760 bool hmm_vma_range_done(struct hmm_range *range) in hmm_vma_range_done() argument
762 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; in hmm_vma_range_done()
765 if (range->end <= range->start) { in hmm_vma_range_done()
770 hmm = hmm_register(range->vma->vm_mm); in hmm_vma_range_done()
772 memset(range->pfns, 0, sizeof(*range->pfns) * npages); in hmm_vma_range_done()
777 list_del_rcu(&range->list); in hmm_vma_range_done()
780 return range->valid; in hmm_vma_range_done()
831 int hmm_vma_fault(struct hmm_range *range, bool block) in hmm_vma_fault() argument
833 struct vm_area_struct *vma = range->vma; in hmm_vma_fault()
834 unsigned long start = range->start; in hmm_vma_fault()
841 if (range->start < vma->vm_start || range->start >= vma->vm_end) in hmm_vma_fault()
843 if (range->end < vma->vm_start || range->end > vma->vm_end) in hmm_vma_fault()
848 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
858 hmm_pfns_special(range); in hmm_vma_fault()
869 hmm_pfns_clear(range, range->pfns, range->start, range->end); in hmm_vma_fault()
875 range->valid = true; in hmm_vma_fault()
876 list_add_rcu(&range->list, &hmm->ranges); in hmm_vma_fault()
881 hmm_vma_walk.range = range; in hmm_vma_fault()
883 hmm_vma_walk.last = range->start; in hmm_vma_fault()
894 ret = walk_page_range(start, range->end, &mm_walk); in hmm_vma_fault()
901 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; in hmm_vma_fault()
902 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, in hmm_vma_fault()
903 range->end); in hmm_vma_fault()
904 hmm_vma_range_done(range); in hmm_vma_fault()