Lines Matching +full:i +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
3 * SPARC64 Huge TLB page support.
17 #include <asm/tlb.h>
22 /* Slightly simplified from the non-hugepage variant because by
48 VM_BUG_ON(addr != -ENOMEM); in hugetlb_get_unmapped_area_bottomup()
64 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area_topdown()
68 /* This should only ever run for 32-bit processes. */ in hugetlb_get_unmapped_area_topdown()
74 info.high_limit = mm->mmap_base; in hugetlb_get_unmapped_area_topdown()
81 * so fall back to the bottom-up function here. This scenario in hugetlb_get_unmapped_area_topdown()
86 VM_BUG_ON(addr != -ENOMEM); in hugetlb_get_unmapped_area_topdown()
101 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area()
109 return -EINVAL; in hugetlb_get_unmapped_area()
111 return -ENOMEM; in hugetlb_get_unmapped_area()
115 return -EINVAL; in hugetlb_get_unmapped_area()
122 if (task_size - len >= addr && in hugetlb_get_unmapped_area()
126 if (mm->get_unmapped_area == arch_get_unmapped_area) in hugetlb_get_unmapped_area()
269 unsigned long size = 1UL << huge_tte_to_shift(pte); in huge_tte_to_size() local
271 if (size == REAL_HPAGE_SIZE) in huge_tte_to_size()
272 size = HPAGE_SIZE; in huge_tte_to_size()
273 return size; in huge_tte_to_size()
334 unsigned long i, size; in set_huge_pte_at() local
337 size = huge_tte_to_size(entry); in set_huge_pte_at()
340 if (size >= PUD_SIZE) in set_huge_pte_at()
342 else if (size >= PMD_SIZE) in set_huge_pte_at()
347 nptes = size >> shift; in set_huge_pte_at()
350 mm->context.hugetlb_pte_count += nptes; in set_huge_pte_at()
352 addr &= ~(size - 1); in set_huge_pte_at()
356 for (i = 0; i < nptes; i++) in set_huge_pte_at()
357 ptep[i] = __pte(pte_val(entry) + (i << shift)); in set_huge_pte_at()
361 if (size == HPAGE_SIZE) in set_huge_pte_at()
369 unsigned int i, nptes, orig_shift, shift; in huge_ptep_get_and_clear() local
370 unsigned long size; in huge_ptep_get_and_clear() local
374 size = huge_tte_to_size(entry); in huge_ptep_get_and_clear()
377 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
379 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear()
384 nptes = size >> shift; in huge_ptep_get_and_clear()
388 mm->context.hugetlb_pte_count -= nptes; in huge_ptep_get_and_clear()
390 addr &= ~(size - 1); in huge_ptep_get_and_clear()
391 for (i = 0; i < nptes; i++) in huge_ptep_get_and_clear()
392 ptep[i] = __pte(0UL); in huge_ptep_get_and_clear()
396 if (size == HPAGE_SIZE) in huge_ptep_get_and_clear()
415 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in hugetlb_free_pte_range() argument
421 pte_free_tlb(tlb, token, addr); in hugetlb_free_pte_range()
422 mm_dec_nr_ptes(tlb->mm); in hugetlb_free_pte_range()
425 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in hugetlb_free_pmd_range() argument
442 hugetlb_free_pte_range(tlb, pmd, addr); in hugetlb_free_pmd_range()
453 if (end - 1 > ceiling - 1) in hugetlb_free_pmd_range()
458 pmd_free_tlb(tlb, pmd, start); in hugetlb_free_pmd_range()
459 mm_dec_nr_pmds(tlb->mm); in hugetlb_free_pmd_range()
462 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in hugetlb_free_pud_range() argument
479 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, in hugetlb_free_pud_range()
491 if (end - 1 > ceiling - 1) in hugetlb_free_pud_range()
496 pud_free_tlb(tlb, pud, start); in hugetlb_free_pud_range()
497 mm_dec_nr_puds(tlb->mm); in hugetlb_free_pud_range()
500 void hugetlb_free_pgd_range(struct mmu_gather *tlb, in hugetlb_free_pgd_range() argument
519 if (end - 1 > ceiling - 1) in hugetlb_free_pgd_range()
520 end -= PMD_SIZE; in hugetlb_free_pgd_range()
521 if (addr > end - 1) in hugetlb_free_pgd_range()
524 pgd = pgd_offset(tlb->mm, addr); in hugetlb_free_pgd_range()
530 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); in hugetlb_free_pgd_range()