Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

1 // SPDX-License-Identifier: GPL-2.0-only
3 * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
24 * Utility Routine to erase a J-TLB entry
61 /* Locate the TLB entry for this vaddr + ASID */ in tlb_entry_erase()
80 * This also sets up PD0 (vaddr, ASID..) for final commit in tlb_entry_insert()
87 * with existing location. This will cause Write CMD to over-write in tlb_entry_insert()
93 /* setup the other half of TLB entry (pfn, rwx..) */ in tlb_entry_insert()
99 * which doesn't flush uTLBs. I'd rather be safe than sorry. in tlb_entry_insert()
129 * Un-conditionally (without lookup) erase the entire MMU contents
137 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
150 /* write this entry to the TLB */ in local_flush_tlb_all()
183 if (atomic_read(&mm->mm_users) == 0) in local_flush_tlb_mm()
187 * - Move to a new ASID, but only if the mm is still wired in in local_flush_tlb_mm()
188 * (Android Binder ended up calling this for vma->mm != tsk->mm, in local_flush_tlb_mm()
189 * causing h/w - s/w ASID to get out of sync) in local_flush_tlb_mm()
190 * - Also get_new_mmu_context() new implementation allocates a new in local_flush_tlb_mm()
191 * ASID only if it is not allocated already - so unallocate first in local_flush_tlb_mm()
194 if (current->mm == mm) in local_flush_tlb_mm()
199 * Flush a Range of TLB entries for userland.
202 * -Here the fastest way (if range is too large) is to move to next ASID
204 * -In case of kernel Flush, entry has to be shot down explicitly
212 /* If range @start to @end is more than 32 TLB entries deep, in local_flush_tlb_range()
219 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_range()
220 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
233 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
235 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
243 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
245 * Interestingly, shared TLB entries can also be flushed using just
253 /* exactly same as above, except for TLB entry not taking ASID */ in local_flush_tlb_kernel_range()
255 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_kernel_range()
272 * Delete TLB entry in MMU for a given page (??? address)
273 * NOTE One TLB entry contains translation for single PAGE
282 * checking the ASID and using it flush the TLB entry in local_flush_tlb_page()
286 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
287 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
305 local_flush_tlb_page(ta->ta_vma, ta->ta_start); in ipi_flush_tlb_page()
312 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_tlb_range()
320 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_pmd_tlb_range()
328 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); in ipi_flush_tlb_kernel_range()
349 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
361 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
374 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range()
390 * Routine to create a TLB entry
400 * create_tlb() assumes that current->mm == vma->mm, since in create_tlb()
401 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) in create_tlb()
402 * -completes the lazy write to SASID reg (again valid for curr tsk) in create_tlb()
405 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. in create_tlb()
406 * -More importantly it makes this handler inconsistent with fast-path in create_tlb()
407 * TLB Refill handler which always deals with "current" in create_tlb()
409 * Lets see the use cases when current->mm != vma->mm and we land here in create_tlb()
410 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault in create_tlb()
411 * Here VM wants to pre-install a TLB entry for user stack while in create_tlb()
412 * current->mm still points to pre-execve mm (hence the condition). in create_tlb()
414 * move_page_tables() tries to undo that TLB entry. in create_tlb()
415 * Thus not creating TLB entry is not any worse. in create_tlb()
417 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a in create_tlb()
418 * breakpoint in debugged task. Not creating a TLB now is not in create_tlb()
423 if (current->active_mm != vma->vm_mm) in create_tlb()
433 /* Create HW TLB(PD0,PD1) from PTE */ in create_tlb()
442 * however Linux only saves 1 set to save PTE real-estate in create_tlb()
444 * -Kernel only entries have Kr Kw Kx 0 0 0 in create_tlb()
445 * -User entries have mirrored K and U bits in create_tlb()
463 * -pre-install the corresponding TLB entry into MMU
464 * -Finalize the delayed D-cache flush of kernel mapping of page due to
467 * Note that flush (when done) involves both WBACK - so physical page is
468 * in sync as well as INV - so any non-congruent aliases don't remain
484 * Exec page : Independent of aliasing/page-color considerations, in update_mmu_cache()
486 * K-mapping of a code page needs to be wback+inv so that in update_mmu_cache()
488 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it in update_mmu_cache()
490 * (Avoids the flush for Non-exec + congruent mapping case) in update_mmu_cache()
492 if ((vma->vm_flags & VM_EXEC) || in update_mmu_cache()
495 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); in update_mmu_cache()
497 /* wback + inv dcache lines (K-mapping) */ in update_mmu_cache()
500 /* invalidate any existing icache lines (U-mapping) */ in update_mmu_cache()
501 if (vma->vm_flags & VM_EXEC) in update_mmu_cache()
513 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
514 * new bit "SZ" in TLB page descriptor to distinguish between them.
519 * - MMU page size (typical 8K, RTL fixed)
520 * - software page walker address split between PGD:PTE:PFN (typical
547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range()
548 unsigned int asid = hw_pid(vma->vm_mm, cpu); in local_flush_pmd_tlb_range()
569 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4, in read_decode_mmu_bcr() member
572 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4, in read_decode_mmu_bcr()
589 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr()
591 if (is_isa_arcompact() && mmu->ver == 3) { in read_decode_mmu_bcr()
593 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); in read_decode_mmu_bcr()
594 mmu->sets = 1 << mmu3->sets; in read_decode_mmu_bcr()
595 mmu->ways = 1 << mmu3->ways; in read_decode_mmu_bcr()
596 mmu->u_dtlb = mmu3->u_dtlb; in read_decode_mmu_bcr()
597 mmu->u_itlb = mmu3->u_itlb; in read_decode_mmu_bcr()
598 mmu->sasid = mmu3->sasid; in read_decode_mmu_bcr()
601 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); in read_decode_mmu_bcr()
602 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); in read_decode_mmu_bcr()
603 mmu->sets = 64 << mmu4->n_entry; in read_decode_mmu_bcr()
604 mmu->ways = mmu4->n_ways * 2; in read_decode_mmu_bcr()
605 mmu->u_dtlb = mmu4->u_dtlb * 4; in read_decode_mmu_bcr()
606 mmu->u_itlb = mmu4->u_itlb * 4; in read_decode_mmu_bcr()
607 mmu->sasid = mmu4->sasid; in read_decode_mmu_bcr()
608 pae_exists = mmu->pae = mmu4->pae; in read_decode_mmu_bcr()
618 if (p_mmu->s_pg_sz_m) in arc_mmu_mumbojumbo()
620 p_mmu->s_pg_sz_m, in arc_mmu_mumbojumbo()
623 n += scnprintf(buf + n, len - n, in arc_mmu_mumbojumbo()
625 p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, in arc_mmu_mumbojumbo()
626 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, in arc_mmu_mumbojumbo()
627 p_mmu->u_dtlb, p_mmu->u_itlb, in arc_mmu_mumbojumbo()
628 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); in arc_mmu_mumbojumbo()
659 * - For older ARC700 cpus, only v3 supported in arc_mmu_init()
660 * - For HS cpus, v4 was baseline and v5 is backwards compatible in arc_mmu_init()
663 if (is_isa_arcompact() && mmu->ver == 3) in arc_mmu_init()
665 else if (is_isa_arcv2() && mmu->ver >= 4) in arc_mmu_init()
669 panic("MMU ver %d doesn't match kernel built for\n", mmu->ver); in arc_mmu_init()
671 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) in arc_mmu_init()
675 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) in arc_mmu_init()
679 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) in arc_mmu_init()
693 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
694 * The mapping is Column-first.
695 * --------------------- -----------
697 * --------------------- -----------
702 * --------------------- -----------
707 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
709 /* Handling of Duplicate PD (TLB entry) in MMU.
710 * -Could be due to buggy customer tapeouts or obscure kernel bugs
711 * -MMU complaints not at the time of duplicate PD installation, but at the
713 * -Ideally these should never happen - but if they do - workaround by deleting
715 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
724 int set, n_ways = mmu->ways; in do_tlb_overlap_fault()
727 BUG_ON(mmu->ways > 4); in do_tlb_overlap_fault()
731 /* loop thru all sets of TLB */ in do_tlb_overlap_fault()
732 for (set = 0; set < mmu->sets; set++) { in do_tlb_overlap_fault()
752 for (way = 0; way < n_ways - 1; way++) { in do_tlb_overlap_fault()
764 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n", in do_tlb_overlap_fault()