1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8 
local_flush_tlb_all_asid(unsigned long asid)9 static inline void local_flush_tlb_all_asid(unsigned long asid)
10 {
11 	__asm__ __volatile__ ("sfence.vma x0, %0"
12 			:
13 			: "r" (asid)
14 			: "memory");
15 }
16 
local_flush_tlb_page_asid(unsigned long addr,unsigned long asid)17 static inline void local_flush_tlb_page_asid(unsigned long addr,
18 		unsigned long asid)
19 {
20 	__asm__ __volatile__ ("sfence.vma %0, %1"
21 			:
22 			: "r" (addr), "r" (asid)
23 			: "memory");
24 }
25 
local_flush_tlb_range(unsigned long start,unsigned long size,unsigned long stride)26 static inline void local_flush_tlb_range(unsigned long start,
27 		unsigned long size, unsigned long stride)
28 {
29 	if (size <= stride)
30 		local_flush_tlb_page(start);
31 	else
32 		local_flush_tlb_all();
33 }
34 
local_flush_tlb_range_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)35 static inline void local_flush_tlb_range_asid(unsigned long start,
36 		unsigned long size, unsigned long stride, unsigned long asid)
37 {
38 	if (size <= stride)
39 		local_flush_tlb_page_asid(start, asid);
40 	else
41 		local_flush_tlb_all_asid(asid);
42 }
43 
__ipi_flush_tlb_all(void * info)44 static void __ipi_flush_tlb_all(void *info)
45 {
46 	local_flush_tlb_all();
47 }
48 
flush_tlb_all(void)49 void flush_tlb_all(void)
50 {
51 	if (riscv_use_ipi_for_rfence())
52 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
53 	else
54 		sbi_remote_sfence_vma(NULL, 0, -1);
55 }
56 
57 struct flush_tlb_range_data {
58 	unsigned long asid;
59 	unsigned long start;
60 	unsigned long size;
61 	unsigned long stride;
62 };
63 
__ipi_flush_tlb_range_asid(void * info)64 static void __ipi_flush_tlb_range_asid(void *info)
65 {
66 	struct flush_tlb_range_data *d = info;
67 
68 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
69 }
70 
__ipi_flush_tlb_range(void * info)71 static void __ipi_flush_tlb_range(void *info)
72 {
73 	struct flush_tlb_range_data *d = info;
74 
75 	local_flush_tlb_range(d->start, d->size, d->stride);
76 }
77 
__flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long size,unsigned long stride)78 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
79 			      unsigned long size, unsigned long stride)
80 {
81 	struct flush_tlb_range_data ftd;
82 	struct cpumask *cmask = mm_cpumask(mm);
83 	unsigned int cpuid;
84 	bool broadcast;
85 
86 	if (cpumask_empty(cmask))
87 		return;
88 
89 	cpuid = get_cpu();
90 	/* check if the tlbflush needs to be sent to other CPUs */
91 	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
92 	if (static_branch_unlikely(&use_asid_allocator)) {
93 		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
94 
95 		if (broadcast) {
96 			if (riscv_use_ipi_for_rfence()) {
97 				ftd.asid = asid;
98 				ftd.start = start;
99 				ftd.size = size;
100 				ftd.stride = stride;
101 				on_each_cpu_mask(cmask,
102 						 __ipi_flush_tlb_range_asid,
103 						 &ftd, 1);
104 			} else
105 				sbi_remote_sfence_vma_asid(cmask,
106 							   start, size, asid);
107 		} else {
108 			local_flush_tlb_range_asid(start, size, stride, asid);
109 		}
110 	} else {
111 		if (broadcast) {
112 			if (riscv_use_ipi_for_rfence()) {
113 				ftd.asid = 0;
114 				ftd.start = start;
115 				ftd.size = size;
116 				ftd.stride = stride;
117 				on_each_cpu_mask(cmask,
118 						 __ipi_flush_tlb_range,
119 						 &ftd, 1);
120 			} else
121 				sbi_remote_sfence_vma(cmask, start, size);
122 		} else {
123 			local_flush_tlb_range(start, size, stride);
124 		}
125 	}
126 
127 	put_cpu();
128 }
129 
flush_tlb_mm(struct mm_struct * mm)130 void flush_tlb_mm(struct mm_struct *mm)
131 {
132 	__flush_tlb_range(mm, 0, -1, PAGE_SIZE);
133 }
134 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)135 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
136 {
137 	__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
138 }
139 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)140 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
141 		     unsigned long end)
142 {
143 	__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
144 }
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)146 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
147 			unsigned long end)
148 {
149 	__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
150 }
151 #endif
152