1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Lockless get_user_pages_fast for s390
4  *
5  *  Copyright IBM Corp. 2010
6  *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/vmstat.h>
12 #include <linux/pagemap.h>
13 #include <linux/rwsem.h>
14 #include <asm/pgtable.h>
15 
16 /*
17  * The performance critical leaf functions are made noinline otherwise gcc
18  * inlines everything into a single function which results in too much
19  * register pressure.
20  */
gup_pte_range(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)21 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
22 		unsigned long end, int write, struct page **pages, int *nr)
23 {
24 	struct page *head, *page;
25 	unsigned long mask;
26 	pte_t *ptep, pte;
27 
28 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
29 
30 	ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
31 	do {
32 		pte = *ptep;
33 		barrier();
34 		/* Similar to the PMD case, NUMA hinting must take slow path */
35 		if (pte_protnone(pte))
36 			return 0;
37 		if ((pte_val(pte) & mask) != 0)
38 			return 0;
39 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
40 		page = pte_page(pte);
41 		head = compound_head(page);
42 		if (!page_cache_get_speculative(head))
43 			return 0;
44 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
45 			put_page(head);
46 			return 0;
47 		}
48 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
49 		pages[*nr] = page;
50 		(*nr)++;
51 
52 	} while (ptep++, addr += PAGE_SIZE, addr != end);
53 
54 	return 1;
55 }
56 
gup_huge_pmd(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)57 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
58 		unsigned long end, int write, struct page **pages, int *nr)
59 {
60 	struct page *head, *page;
61 	unsigned long mask;
62 	int refs;
63 
64 	mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
65 	if ((pmd_val(pmd) & mask) != 0)
66 		return 0;
67 	VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
68 
69 	refs = 0;
70 	head = pmd_page(pmd);
71 	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
72 	do {
73 		VM_BUG_ON(compound_head(page) != head);
74 		pages[*nr] = page;
75 		(*nr)++;
76 		page++;
77 		refs++;
78 	} while (addr += PAGE_SIZE, addr != end);
79 
80 	if (!page_cache_add_speculative(head, refs)) {
81 		*nr -= refs;
82 		return 0;
83 	}
84 
85 	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
86 		*nr -= refs;
87 		while (refs--)
88 			put_page(head);
89 		return 0;
90 	}
91 
92 	return 1;
93 }
94 
95 
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)96 static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
97 		unsigned long end, int write, struct page **pages, int *nr)
98 {
99 	unsigned long next;
100 	pmd_t *pmdp, pmd;
101 
102 	pmdp = (pmd_t *) pudp;
103 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
104 		pmdp = (pmd_t *) pud_deref(pud);
105 	pmdp += pmd_index(addr);
106 	do {
107 		pmd = *pmdp;
108 		barrier();
109 		next = pmd_addr_end(addr, end);
110 		if (pmd_none(pmd))
111 			return 0;
112 		if (unlikely(pmd_large(pmd))) {
113 			/*
114 			 * NUMA hinting faults need to be handled in the GUP
115 			 * slowpath for accounting purposes and so that they
116 			 * can be serialised against THP migration.
117 			 */
118 			if (pmd_protnone(pmd))
119 				return 0;
120 			if (!gup_huge_pmd(pmdp, pmd, addr, next,
121 					  write, pages, nr))
122 				return 0;
123 		} else if (!gup_pte_range(pmdp, pmd, addr, next,
124 					  write, pages, nr))
125 			return 0;
126 	} while (pmdp++, addr = next, addr != end);
127 
128 	return 1;
129 }
130 
gup_huge_pud(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)131 static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
132 		unsigned long end, int write, struct page **pages, int *nr)
133 {
134 	struct page *head, *page;
135 	unsigned long mask;
136 	int refs;
137 
138 	mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
139 	if ((pud_val(pud) & mask) != 0)
140 		return 0;
141 	VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
142 
143 	refs = 0;
144 	head = pud_page(pud);
145 	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
146 	do {
147 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
148 		pages[*nr] = page;
149 		(*nr)++;
150 		page++;
151 		refs++;
152 	} while (addr += PAGE_SIZE, addr != end);
153 
154 	if (!page_cache_add_speculative(head, refs)) {
155 		*nr -= refs;
156 		return 0;
157 	}
158 
159 	if (unlikely(pud_val(pud) != pud_val(*pudp))) {
160 		*nr -= refs;
161 		while (refs--)
162 			put_page(head);
163 		return 0;
164 	}
165 
166 	return 1;
167 }
168 
gup_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)169 static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
170 		unsigned long end, int write, struct page **pages, int *nr)
171 {
172 	unsigned long next;
173 	pud_t *pudp, pud;
174 
175 	pudp = (pud_t *) p4dp;
176 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
177 		pudp = (pud_t *) p4d_deref(p4d);
178 	pudp += pud_index(addr);
179 	do {
180 		pud = *pudp;
181 		barrier();
182 		next = pud_addr_end(addr, end);
183 		if (pud_none(pud))
184 			return 0;
185 		if (unlikely(pud_large(pud))) {
186 			if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
187 					  nr))
188 				return 0;
189 		} else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
190 					  nr))
191 			return 0;
192 	} while (pudp++, addr = next, addr != end);
193 
194 	return 1;
195 }
196 
gup_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)197 static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
198 		unsigned long end, int write, struct page **pages, int *nr)
199 {
200 	unsigned long next;
201 	p4d_t *p4dp, p4d;
202 
203 	p4dp = (p4d_t *) pgdp;
204 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
205 		p4dp = (p4d_t *) pgd_deref(pgd);
206 	p4dp += p4d_index(addr);
207 	do {
208 		p4d = *p4dp;
209 		barrier();
210 		next = p4d_addr_end(addr, end);
211 		if (p4d_none(p4d))
212 			return 0;
213 		if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
214 			return 0;
215 	} while (p4dp++, addr = next, addr != end);
216 
217 	return 1;
218 }
219 
220 /*
221  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
222  * back to the regular GUP.
223  * Note a difference with get_user_pages_fast: this always returns the
224  * number of pages pinned, 0 if no pages were pinned.
225  */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)226 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
227 			  struct page **pages)
228 {
229 	struct mm_struct *mm = current->mm;
230 	unsigned long addr, len, end;
231 	unsigned long next, flags;
232 	pgd_t *pgdp, pgd;
233 	int nr = 0;
234 
235 	start &= PAGE_MASK;
236 	addr = start;
237 	len = (unsigned long) nr_pages << PAGE_SHIFT;
238 	end = start + len;
239 	if ((end <= start) || (end > mm->context.asce_limit))
240 		return 0;
241 	/*
242 	 * local_irq_save() doesn't prevent pagetable teardown, but does
243 	 * prevent the pagetables from being freed on s390.
244 	 *
245 	 * So long as we atomically load page table pointers versus teardown,
246 	 * we can follow the address down to the the page and take a ref on it.
247 	 */
248 	local_irq_save(flags);
249 	pgdp = pgd_offset(mm, addr);
250 	do {
251 		pgd = *pgdp;
252 		barrier();
253 		next = pgd_addr_end(addr, end);
254 		if (pgd_none(pgd))
255 			break;
256 		if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
257 			break;
258 	} while (pgdp++, addr = next, addr != end);
259 	local_irq_restore(flags);
260 
261 	return nr;
262 }
263 
264 /**
265  * get_user_pages_fast() - pin user pages in memory
266  * @start:	starting user address
267  * @nr_pages:	number of pages from start to pin
268  * @write:	whether pages will be written to
269  * @pages:	array that receives pointers to the pages pinned.
270  *		Should be at least nr_pages long.
271  *
272  * Attempt to pin user pages in memory without taking mm->mmap_sem.
273  * If not successful, it will fall back to taking the lock and
274  * calling get_user_pages().
275  *
276  * Returns number of pages pinned. This may be fewer than the number
277  * requested. If nr_pages is 0 or negative, returns 0. If no pages
278  * were pinned, returns -errno.
279  */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)280 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
281 			struct page **pages)
282 {
283 	int nr, ret;
284 
285 	might_sleep();
286 	start &= PAGE_MASK;
287 	nr = __get_user_pages_fast(start, nr_pages, write, pages);
288 	if (nr == nr_pages)
289 		return nr;
290 
291 	/* Try to get the remaining pages with get_user_pages */
292 	start += nr << PAGE_SHIFT;
293 	pages += nr;
294 	ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
295 				      write ? FOLL_WRITE : 0);
296 	/* Have to be a bit careful with return values */
297 	if (nr > 0)
298 		ret = (ret < 0) ? nr : ret + nr;
299 	return ret;
300 }
301