Lines Matching +full:non +full:- +full:linear

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * this does -not- include 603 however which shares the implementation with
9 * -- BenH
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
40 #include <asm/code-patching.h>
48 * This struct lists the sw-supported page sizes. The hardawre MMU may support
113 /* The variables below are currently only used on 64-bit Book3E
122 unsigned long linear_map_top; /* Top of linear mapping */
136 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
144 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
145 * - flush_tlb_page(vma, vmaddr) flushes one page
146 * - flush_tlb_range(vma, start, end) flushes a range of pages
147 * - flush_tlb_kernel_range(start, end) flushes kernel pages
149 * - local_* variants of page and mm only apply to the current
155 * These are the base non-SMP variants of page and mm flushing
162 pid = mm->context.id; in local_flush_tlb_mm()
175 pid = mm ? mm->context.id : 0; in __local_flush_tlb_page()
183 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in local_flush_tlb_page()
190 * And here are the SMP non-local implementations
207 _tlbil_pid(p ? p->pid : 0); in do_flush_tlb_mm_ipi()
214 _tlbil_va(p->addr, p->pid, p->tsize, p->ind); in do_flush_tlb_page_ipi()
222 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
223 * - we are invaliating some target that isn't currently running here
225 * - some other CPU is re-acquiring a lost PID for this mm
239 pid = mm->context.id; in flush_tlb_mm()
268 pid = mm->context.id; in __flush_tlb_page()
306 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, in flush_tlb_page()
318 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) in early_init_mmu_47x()
352 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) in flush_tlb_range()
355 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
361 flush_tlb_mm(tlb->mm); in tlb_flush()
365 * Below are functions specific to the 64-bit variant of Book3E though that
372 * Handling of virtual linear page tables or indirect TLB entries
389 __flush_tlb_page(tlb->mm, start, tsize, 1); in tlb_flush_pgtable()
397 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; in tlb_flush_pgtable()
399 __flush_tlb_page(tlb->mm, vpte, tsize, 0); in tlb_flush_pgtable()
426 shift = def->shift; in setup_page_sizes()
432 shift = (shift - 10) >> 1; in setup_page_sizes()
435 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
463 if (!def->shift) in setup_page_sizes()
466 if (tlb1ps & (1U << (def->shift - 10))) { in setup_page_sizes()
467 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
470 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
486 if (tlb0ps & (1U << (def->shift - 10))) in setup_page_sizes()
487 def->flags |= MMU_PAGE_SIZE_DIRECT; in setup_page_sizes()
514 if (ps == (def->shift - 10)) in setup_page_sizes()
515 def->flags |= MMU_PAGE_SIZE_INDIRECT; in setup_page_sizes()
516 if (sps == (def->shift - 10)) in setup_page_sizes()
517 def->ind = ps + 10; in setup_page_sizes()
532 if (def->flags == 0) { in setup_page_sizes()
533 def->shift = 0; in setup_page_sizes()
536 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), in setup_page_sizes()
537 __page_type_names[def->flags & 0x3]); in setup_page_sizes()
601 /* use a quarter of the TLBCAM for bolted linear map */ in early_init_this_mmu()
660 /* Set the global containing the top of the linear mapping in early_init_mmu_global()
673 * Limit memory so we dont have linear faults. in early_mmu_set_memory_limit()
677 * do this because highmem is not supported on 64-bit. in early_mmu_set_memory_limit()
702 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match in setup_initial_memory_limit()
707 * on FSL Embedded 64-bit, usually all RAM is bolted, but with in setup_initial_memory_limit()
710 * highmem on 64-bit). We limit ppc64_rma_size to what would be in setup_initial_memory_limit()
724 /* use a quarter of the TLBCAM for bolted linear map */ in setup_initial_memory_limit()