Lines Matching +full:i +full:- +full:tlb +full:- +full:size
2 * PPC Huge TLB Page Support for Kernel.
7 * Based on the IA-32 version:
23 #include <asm/tlb.h>
26 #include <asm/pte-walk.h>
33 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
42 return __find_linux_pte(mm->pgd, addr, NULL, NULL); in huge_pte_offset()
51 int i; in __hugepte_alloc() local
56 num_hugepd = 1 << (pshift - pdshift); in __hugepte_alloc()
58 cachep = PGT_CACHE(pdshift - pshift); in __hugepte_alloc()
64 return -ENOMEM; in __hugepte_alloc()
73 return -ENOMEM; in __hugepte_alloc()
84 * We have multiple higher-level entries that point to the same in __hugepte_alloc()
87 * right higher-level entry without knowing if it's a hugepage or not. in __hugepte_alloc()
89 for (i = 0; i < num_hugepd; i++, hpdp++) { in __hugepte_alloc()
95 if (i < num_hugepd) { in __hugepte_alloc()
96 for (i = i - 1 ; i >= 0; i--, hpdp--) in __hugepte_alloc()
122 addr &= ~(sz-1); in huge_pte_alloc()
134 ptl = &mm->page_table_lock; in huge_pte_alloc()
162 ptl = &mm->page_table_lock; in huge_pte_alloc()
217 number_of_pages--; in pseries_add_gpage()
227 m = phys_to_virt(gpage_freearray[--nr_gpages]); in pseries_alloc_bootmem_huge_page()
229 list_add(&m->list, &huge_boot_pages); in pseries_alloc_bootmem_huge_page()
230 m->hstate = hstate; in pseries_alloc_bootmem_huge_page()
253 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
267 unsigned int i; in hugepd_free_rcu_callback() local
269 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback()
270 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); in hugepd_free_rcu_callback()
275 static void hugepd_free(struct mmu_gather *tlb, void *hugepte) in hugepd_free() argument
281 if (atomic_read(&tlb->mm->mm_users) < 2 || in hugepd_free()
282 mm_is_thread_local(tlb->mm)) { in hugepd_free()
290 (*batchp)->index = 0; in hugepd_free()
293 (*batchp)->ptes[(*batchp)->index++] = hugepte; in hugepd_free()
294 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { in hugepd_free()
295 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); in hugepd_free()
301 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} in hugepd_free() argument
316 return end - 1 > ceiling - 1; in range_is_outside_limits()
319 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, in free_hugepd_range() argument
324 int i; in free_hugepd_range() local
326 unsigned long pdmask = ~((1UL << pdshift) - 1); in free_hugepd_range()
332 num_hugepd = 1 << (shift - pdshift); in free_hugepd_range()
337 for (i = 0; i < num_hugepd; i++, hpdp++) in free_hugepd_range()
341 hugepd_free(tlb, hugepte); in free_hugepd_range()
343 pgtable_free_tlb(tlb, hugepte, in free_hugepd_range()
344 get_hugepd_cache_index(pdshift - shift)); in free_hugepd_range()
347 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in hugetlb_free_pte_range() argument
357 pte_free_tlb(tlb, token, addr); in hugetlb_free_pte_range()
358 mm_dec_nr_ptes(tlb->mm); in hugetlb_free_pte_range()
361 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in hugetlb_free_pmd_range() argument
385 hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling); in hugetlb_free_pmd_range()
390 * Increment next by the size of the huge mapping since in hugetlb_free_pmd_range()
399 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, in hugetlb_free_pmd_range()
408 pmd_free_tlb(tlb, pmd, start & PUD_MASK); in hugetlb_free_pmd_range()
409 mm_dec_nr_pmds(tlb->mm); in hugetlb_free_pmd_range()
412 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in hugetlb_free_pud_range() argument
427 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, in hugetlb_free_pud_range()
432 * Increment next by the size of the huge mapping since in hugetlb_free_pud_range()
441 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, in hugetlb_free_pud_range()
451 pud_free_tlb(tlb, pud, start & PGDIR_MASK); in hugetlb_free_pud_range()
452 mm_dec_nr_puds(tlb->mm); in hugetlb_free_pud_range()
456 * This function frees user-level page tables of a process.
458 void hugetlb_free_pgd_range(struct mmu_gather *tlb, in hugetlb_free_pgd_range() argument
485 pgd = pgd_offset(tlb->mm, addr); in hugetlb_free_pgd_range()
490 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); in hugetlb_free_pgd_range()
494 * Increment next by the size of the huge mapping since in hugetlb_free_pgd_range()
503 free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT, in hugetlb_free_pgd_range()
518 struct mm_struct *mm = vma->vm_mm; in follow_huge_pd()
522 * hugepage directory entries are protected by mm->page_table_lock in follow_huge_pd()
525 ptl = &mm->page_table_lock; in follow_huge_pd()
530 mask = (1UL << shift) - 1; in follow_huge_pd()
546 bool __init arch_hugetlb_valid_size(unsigned long size) in arch_hugetlb_valid_size() argument
548 int shift = __ffs(size); in arch_hugetlb_valid_size()
551 /* Check that it is a page size supported by the hardware and in arch_hugetlb_valid_size()
553 if (size <= PAGE_SIZE || !is_power_of_2(size)) in arch_hugetlb_valid_size()
565 static int __init add_huge_page_size(unsigned long long size) in add_huge_page_size() argument
567 int shift = __ffs(size); in add_huge_page_size()
569 if (!arch_hugetlb_valid_size((unsigned long)size)) in add_huge_page_size()
570 return -EINVAL; in add_huge_page_size()
572 hugetlb_add_hstate(shift - PAGE_SHIFT); in add_huge_page_size()
588 return -ENODEV; in hugetlbpage_init()
625 pgtable_cache_add(pdshift - shift); in hugetlbpage_init()
647 order = PUD_SHIFT - PAGE_SHIFT; in gigantic_hugetlb_cma_reserve()
652 order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; in gigantic_hugetlb_cma_reserve()