Lines Matching +full:local +full:- +full:pid
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
15 #include <asm/ppc-opcode.h>
29 unsigned int pid, in tlbiel_radix_set_isa300() argument
36 rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); in tlbiel_radix_set_isa300()
95 WARN(1, "%s called on pre-POWER9 CPU\n", __func__); in radix__tlbiel_all()
100 static __always_inline void __tlbiel_pid(unsigned long pid, int set, in __tlbiel_pid() argument
107 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); in __tlbiel_pid()
116 static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric) in __tlbie_pid() argument
121 rs = pid << PPC_BITLSHIFT(31); in __tlbie_pid()
130 static __always_inline void __tlbie_pid_lpid(unsigned long pid, in __tlbie_pid_lpid() argument
137 rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); in __tlbie_pid_lpid()
173 static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, in __tlbiel_va() argument
180 rs = pid << PPC_BITLSHIFT(31); in __tlbiel_va()
189 static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, in __tlbie_va() argument
196 rs = pid << PPC_BITLSHIFT(31); in __tlbie_va()
205 static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid, in __tlbie_va_lpid() argument
213 rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); in __tlbie_va_lpid()
239 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, in fixup_tlbie_va() argument
249 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va()
253 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, in fixup_tlbie_va_range() argument
263 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va_range()
268 unsigned long pid, in fixup_tlbie_va_range_lpid() argument
279 __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va_range_lpid()
283 static inline void fixup_tlbie_pid(unsigned long pid) in fixup_tlbie_pid() argument
289 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_pid()
298 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); in fixup_tlbie_pid()
302 static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid) in fixup_tlbie_pid_lpid() argument
308 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_pid_lpid()
317 __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), in fixup_tlbie_pid_lpid()
342 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_lpid()
358 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) in _tlbiel_pid() argument
368 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in _tlbiel_pid()
372 __tlbiel_pid(pid, 0, RIC_FLUSH_TLB); in _tlbiel_pid()
381 __tlbiel_pid(pid, 0, RIC_FLUSH_ALL); in _tlbiel_pid()
387 __tlbiel_pid(pid, set, RIC_FLUSH_TLB); in _tlbiel_pid()
394 static inline void _tlbie_pid(unsigned long pid, unsigned long ric) in _tlbie_pid() argument
400 * must be a compile-time constraint to match the "i" constraint in _tlbie_pid()
405 __tlbie_pid(pid, RIC_FLUSH_TLB); in _tlbie_pid()
406 fixup_tlbie_pid(pid); in _tlbie_pid()
409 __tlbie_pid(pid, RIC_FLUSH_PWC); in _tlbie_pid()
413 __tlbie_pid(pid, RIC_FLUSH_ALL); in _tlbie_pid()
414 fixup_tlbie_pid(pid); in _tlbie_pid()
419 static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid, in _tlbie_pid_lpid() argument
426 * must be a compile-time contraint to match the "i" constraint in _tlbie_pid_lpid()
431 __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); in _tlbie_pid_lpid()
432 fixup_tlbie_pid_lpid(pid, lpid); in _tlbie_pid_lpid()
435 __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); in _tlbie_pid_lpid()
439 __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); in _tlbie_pid_lpid()
440 fixup_tlbie_pid_lpid(pid, lpid); in _tlbie_pid_lpid()
445 unsigned long pid; member
453 if (t->ric == RIC_FLUSH_TLB) in do_tlbiel_pid()
454 _tlbiel_pid(t->pid, RIC_FLUSH_TLB); in do_tlbiel_pid()
455 else if (t->ric == RIC_FLUSH_PWC) in do_tlbiel_pid()
456 _tlbiel_pid(t->pid, RIC_FLUSH_PWC); in do_tlbiel_pid()
458 _tlbiel_pid(t->pid, RIC_FLUSH_ALL); in do_tlbiel_pid()
462 unsigned long pid, unsigned long ric) in _tlbiel_pid_multicast() argument
465 struct tlbiel_pid t = { .pid = pid, .ric = ric }; in _tlbiel_pid_multicast()
473 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_pid_multicast()
474 _tlbie_pid(pid, RIC_FLUSH_ALL); in _tlbiel_pid_multicast()
483 * must be a compile-time contraint to match the "i" constraint in _tlbie_lpid()
506 * must be a compile-time contraint to match the "i" constraint in _tlbie_lpid_guest()
525 unsigned long pid, unsigned long page_size, in __tlbiel_va_range() argument
532 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); in __tlbiel_va_range()
535 static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, in _tlbiel_va() argument
541 __tlbiel_va(va, pid, ap, ric); in _tlbiel_va()
546 unsigned long pid, unsigned long page_size, in _tlbiel_va_range() argument
551 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in _tlbiel_va_range()
552 __tlbiel_va_range(start, end, pid, page_size, psize); in _tlbiel_va_range()
557 unsigned long pid, unsigned long page_size, in __tlbie_va_range() argument
564 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); in __tlbie_va_range()
566 fixup_tlbie_va_range(addr - page_size, pid, ap); in __tlbie_va_range()
570 unsigned long pid, unsigned long lpid, in __tlbie_va_range_lpid() argument
578 __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); in __tlbie_va_range_lpid()
580 fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); in __tlbie_va_range_lpid()
583 static __always_inline void _tlbie_va(unsigned long va, unsigned long pid, in _tlbie_va() argument
589 __tlbie_va(va, pid, ap, ric); in _tlbie_va()
590 fixup_tlbie_va(va, pid, ap); in _tlbie_va()
595 unsigned long pid; member
605 if (t->ric == RIC_FLUSH_TLB) in do_tlbiel_va()
606 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB); in do_tlbiel_va()
607 else if (t->ric == RIC_FLUSH_PWC) in do_tlbiel_va()
608 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC); in do_tlbiel_va()
610 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL); in do_tlbiel_va()
614 unsigned long va, unsigned long pid, in _tlbiel_va_multicast() argument
618 struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric }; in _tlbiel_va_multicast()
620 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_va_multicast()
621 _tlbie_va(va, pid, psize, RIC_FLUSH_TLB); in _tlbiel_va_multicast()
625 unsigned long pid; member
637 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, in do_tlbiel_va_range()
638 t->psize, t->also_pwc); in do_tlbiel_va_range()
653 unsigned long pid, unsigned long page_size, in _tlbie_va_range() argument
658 __tlbie_pid(pid, RIC_FLUSH_PWC); in _tlbie_va_range()
659 __tlbie_va_range(start, end, pid, page_size, psize); in _tlbie_va_range()
664 unsigned long pid, unsigned long lpid, in _tlbie_va_range_lpid() argument
670 __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); in _tlbie_va_range_lpid()
671 __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize); in _tlbie_va_range_lpid()
677 unsigned long pid, unsigned long page_size, in _tlbiel_va_range_multicast() argument
682 .pid = pid, .page_size = page_size, in _tlbiel_va_range_multicast()
686 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_va_range_multicast()
687 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); in _tlbiel_va_range_multicast()
693 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
694 * - flush_tlb_page(vma, vmaddr) flushes one page
695 * - flush_tlb_range(vma, start, end) flushes a range of pages
696 * - flush_tlb_kernel_range(start, end) flushes kernel pages
698 * - local_* variants of page and mm only apply to the current
703 unsigned long pid; in radix__local_flush_tlb_mm() local
706 pid = mm->context.id; in radix__local_flush_tlb_mm()
707 if (pid != MMU_NO_CONTEXT) in radix__local_flush_tlb_mm()
708 _tlbiel_pid(pid, RIC_FLUSH_TLB); in radix__local_flush_tlb_mm()
716 unsigned long pid; in radix__local_flush_all_mm() local
719 pid = mm->context.id; in radix__local_flush_all_mm()
720 if (pid != MMU_NO_CONTEXT) in radix__local_flush_all_mm()
721 _tlbiel_pid(pid, RIC_FLUSH_ALL); in radix__local_flush_all_mm()
735 unsigned long pid; in radix__local_flush_tlb_page_psize() local
738 pid = mm->context.id; in radix__local_flush_tlb_page_psize()
739 if (pid != MMU_NO_CONTEXT) in radix__local_flush_tlb_page_psize()
740 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__local_flush_tlb_page_psize()
751 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__local_flush_tlb_page()
759 * and not flushing them when RIC = 0 for a PID/LPID invalidate. in mm_needs_flush_escalation()
763 * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes in mm_needs_flush_escalation()
770 if (atomic_read(&mm->context.copros) > 0) in mm_needs_flush_escalation()
781 unsigned long pid = mm->context.id; in exit_lazy_flush_tlb() local
787 * interrupted here. In that case, current->mm will be set to mm, in exit_lazy_flush_tlb()
788 * because kthread_use_mm() setting ->mm and switching to the mm is in exit_lazy_flush_tlb()
791 if (current->mm == mm) in exit_lazy_flush_tlb()
794 if (current->active_mm == mm) { in exit_lazy_flush_tlb()
795 WARN_ON_ONCE(current->mm != NULL); in exit_lazy_flush_tlb()
798 current->active_mm = &init_mm; in exit_lazy_flush_tlb()
812 atomic_dec(&mm->context.active_cpus); in exit_lazy_flush_tlb()
819 _tlbiel_pid(pid, RIC_FLUSH_ALL); in exit_lazy_flush_tlb()
833 * parallel with our local flush, but generic code does not in exit_flush_lazy_tlbs()
850 * mm_cpumask can be trimmed for the case where it's not a single-threaded
875 int active_cpus = atomic_read(&mm->context.active_cpus); in flush_type_needed()
881 if (current->mm != mm) { in flush_type_needed()
896 if (atomic_read(&mm->context.copros) > 0) in flush_type_needed()
908 * If we are running the only thread of a single-threaded process, in flush_type_needed()
913 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) { in flush_type_needed()
920 * will only require a local flush. in flush_type_needed()
931 if (current->mm == mm) in flush_type_needed()
944 unsigned long pid; in radix__flush_tlb_mm() local
947 pid = mm->context.id; in radix__flush_tlb_mm()
948 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_mm()
960 _tlbiel_pid(pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
965 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_mm()
967 pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, in radix__flush_tlb_mm()
968 H_RPTI_PAGE_ALL, 0, -1UL); in radix__flush_tlb_mm()
971 _tlbie_pid(pid, RIC_FLUSH_ALL); in radix__flush_tlb_mm()
973 _tlbie_pid(pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
975 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
984 unsigned long pid; in __flush_all_mm() local
987 pid = mm->context.id; in __flush_all_mm()
988 if (unlikely(pid == MMU_NO_CONTEXT)) in __flush_all_mm()
995 _tlbiel_pid(pid, RIC_FLUSH_ALL); in __flush_all_mm()
1002 if (atomic_read(&mm->context.copros) > 0) in __flush_all_mm()
1004 pseries_rpt_invalidate(pid, tgt, type, in __flush_all_mm()
1005 H_RPTI_PAGE_ALL, 0, -1UL); in __flush_all_mm()
1007 _tlbie_pid(pid, RIC_FLUSH_ALL); in __flush_all_mm()
1009 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); in __flush_all_mm()
1023 unsigned long pid; in radix__flush_tlb_page_psize() local
1026 pid = mm->context.id; in radix__flush_tlb_page_psize()
1027 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_page_psize()
1034 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
1043 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_page_psize()
1045 pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, in radix__flush_tlb_page_psize()
1049 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
1051 _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
1062 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__flush_tlb_page()
1087 * If kernel TLBIs ever become local rather than global, then
1107 #define TLB_FLUSH_ALL -1UL
1110 * Number of pages above which we invalidate the entire PID rather than
1111 * flush individual pages, for local and global flushes respectively.
1114 * It also does not iterate over sets like the local tlbiel variant when
1115 * invalidating a full PID, so it has a far lower threshold to change from
1116 * individual page flushes to full-pid flushes.
1124 unsigned long pid; in __radix__flush_tlb_range() local
1127 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range()
1132 pid = mm->context.id; in __radix__flush_tlb_range()
1133 if (unlikely(pid == MMU_NO_CONTEXT)) in __radix__flush_tlb_range()
1149 * full pid flush already does the PWC flush. if it is not full pid in __radix__flush_tlb_range()
1153 if (!flush_pid && (end - start) >= PMD_SIZE) in __radix__flush_tlb_range()
1163 if (atomic_read(&mm->context.copros) > 0) in __radix__flush_tlb_range()
1167 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); in __radix__flush_tlb_range()
1173 _tlbiel_pid(pid, RIC_FLUSH_ALL); in __radix__flush_tlb_range()
1176 _tlbie_pid(pid, RIC_FLUSH_ALL); in __radix__flush_tlb_range()
1178 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); in __radix__flush_tlb_range()
1186 hstart = (start + PMD_SIZE - 1) & PMD_MASK; in __radix__flush_tlb_range()
1196 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in __radix__flush_tlb_range()
1197 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); in __radix__flush_tlb_range()
1199 __tlbiel_va_range(hstart, hend, pid, in __radix__flush_tlb_range()
1205 __tlbie_pid(pid, RIC_FLUSH_PWC); in __radix__flush_tlb_range()
1206 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); in __radix__flush_tlb_range()
1208 __tlbie_va_range(hstart, hend, pid, in __radix__flush_tlb_range()
1213 start, end, pid, page_size, mmu_virtual_psize, flush_pwc); in __radix__flush_tlb_range()
1216 hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc); in __radix__flush_tlb_range()
1232 __radix__flush_tlb_range(vma->vm_mm, start, end); in radix__flush_tlb_range()
1247 return -1; in radix_get_mmu_psize()
1293 struct mm_struct *mm = tlb->mm; in radix__tlb_flush()
1294 int page_size = tlb->page_size; in radix__tlb_flush()
1295 unsigned long start = tlb->start; in radix__tlb_flush()
1296 unsigned long end = tlb->end; in radix__tlb_flush()
1305 if (tlb->fullmm || tlb->need_flush_all) { in radix__tlb_flush()
1307 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { in radix__tlb_flush()
1308 if (!tlb->freed_tables) in radix__tlb_flush()
1313 if (!tlb->freed_tables) in radix__tlb_flush()
1324 unsigned long pid; in __radix__flush_tlb_range_psize() local
1327 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range_psize()
1332 pid = mm->context.id; in __radix__flush_tlb_range_psize()
1333 if (unlikely(pid == MMU_NO_CONTEXT)) in __radix__flush_tlb_range_psize()
1358 if (atomic_read(&mm->context.copros) > 0) in __radix__flush_tlb_range_psize()
1360 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); in __radix__flush_tlb_range_psize()
1363 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); in __radix__flush_tlb_range_psize()
1369 _tlbie_pid(pid, in __radix__flush_tlb_range_psize()
1372 _tlbiel_pid_multicast(mm, pid, in __radix__flush_tlb_range_psize()
1379 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); in __radix__flush_tlb_range_psize()
1381 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc); in __radix__flush_tlb_range_psize()
1384 start, end, pid, page_size, psize, also_pwc); in __radix__flush_tlb_range_psize()
1405 unsigned long pid, end; in radix__flush_tlb_collapsed_pmd() local
1408 pid = mm->context.id; in radix__flush_tlb_collapsed_pmd()
1409 if (unlikely(pid == MMU_NO_CONTEXT)) in radix__flush_tlb_collapsed_pmd()
1425 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); in radix__flush_tlb_collapsed_pmd()
1435 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_collapsed_pmd()
1437 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, in radix__flush_tlb_collapsed_pmd()
1440 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); in radix__flush_tlb_collapsed_pmd()
1443 addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); in radix__flush_tlb_collapsed_pmd()
1453 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); in radix__flush_pmd_tlb_range()
1465 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ in radix__flush_tlb_all()
1483 * Performs process-scoped invalidations for a given LPID
1486 void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid, in do_h_rpt_invalidate_prt() argument
1499 _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); in do_h_rpt_invalidate_prt()
1504 _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); in do_h_rpt_invalidate_prt()
1506 /* Full PID flush */ in do_h_rpt_invalidate_prt()
1507 if (start == 0 && end == -1) in do_h_rpt_invalidate_prt()
1508 return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); in do_h_rpt_invalidate_prt()
1513 if (!(pg_sizes & def->h_rpt_pgsize)) in do_h_rpt_invalidate_prt()
1516 nr_pages = (end - start) >> def->shift; in do_h_rpt_invalidate_prt()
1521 * the ceiling, convert the request into a full PID flush. in do_h_rpt_invalidate_prt()
1522 * And since PID flush takes out all the page sizes, there in do_h_rpt_invalidate_prt()
1526 _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); in do_h_rpt_invalidate_prt()
1529 _tlbie_va_range_lpid(start, end, pid, lpid, in do_h_rpt_invalidate_prt()
1530 (1UL << def->shift), psize, false); in do_h_rpt_invalidate_prt()