1 /*
2  *  IOMMU helpers in MMU context.
3  *
4  *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/sizes.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pte-walk.h>
24 
25 static DEFINE_MUTEX(mem_list_mutex);
26 
27 #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY	0x1
28 #define MM_IOMMU_TABLE_GROUP_PAGE_MASK	~(SZ_4K - 1)
29 
30 struct mm_iommu_table_group_mem_t {
31 	struct list_head next;
32 	struct rcu_head rcu;
33 	unsigned long used;
34 	atomic64_t mapped;
35 	unsigned int pageshift;
36 	u64 ua;			/* userspace address */
37 	u64 entries;		/* number of entries in hpas[] */
38 	u64 *hpas;		/* vmalloc'ed */
39 };
40 
mm_iommu_adjust_locked_vm(struct mm_struct * mm,unsigned long npages,bool incr)41 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
42 		unsigned long npages, bool incr)
43 {
44 	long ret = 0, locked, lock_limit;
45 
46 	if (!npages)
47 		return 0;
48 
49 	down_write(&mm->mmap_sem);
50 
51 	if (incr) {
52 		locked = mm->locked_vm + npages;
53 		lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
54 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
55 			ret = -ENOMEM;
56 		else
57 			mm->locked_vm += npages;
58 	} else {
59 		if (WARN_ON_ONCE(npages > mm->locked_vm))
60 			npages = mm->locked_vm;
61 		mm->locked_vm -= npages;
62 	}
63 
64 	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
65 			current ? current->pid : 0,
66 			incr ? '+' : '-',
67 			npages << PAGE_SHIFT,
68 			mm->locked_vm << PAGE_SHIFT,
69 			rlimit(RLIMIT_MEMLOCK));
70 	up_write(&mm->mmap_sem);
71 
72 	return ret;
73 }
74 
mm_iommu_preregistered(struct mm_struct * mm)75 bool mm_iommu_preregistered(struct mm_struct *mm)
76 {
77 	return !list_empty(&mm->context.iommu_group_mem_list);
78 }
79 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
80 
81 /*
82  * Taken from alloc_migrate_target with changes to remove CMA allocations
83  */
new_iommu_non_cma_page(struct page * page,unsigned long private)84 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
85 {
86 	gfp_t gfp_mask = GFP_USER;
87 	struct page *new_page;
88 
89 	if (PageCompound(page))
90 		return NULL;
91 
92 	if (PageHighMem(page))
93 		gfp_mask |= __GFP_HIGHMEM;
94 
95 	/*
96 	 * We don't want the allocation to force an OOM if possibe
97 	 */
98 	new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
99 	return new_page;
100 }
101 
mm_iommu_move_page_from_cma(struct page * page)102 static int mm_iommu_move_page_from_cma(struct page *page)
103 {
104 	int ret = 0;
105 	LIST_HEAD(cma_migrate_pages);
106 
107 	/* Ignore huge pages for now */
108 	if (PageCompound(page))
109 		return -EBUSY;
110 
111 	lru_add_drain();
112 	ret = isolate_lru_page(page);
113 	if (ret)
114 		return ret;
115 
116 	list_add(&page->lru, &cma_migrate_pages);
117 	put_page(page); /* Drop the gup reference */
118 
119 	ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
120 				NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
121 	if (ret) {
122 		if (!list_empty(&cma_migrate_pages))
123 			putback_movable_pages(&cma_migrate_pages);
124 	}
125 
126 	return 0;
127 }
128 
mm_iommu_get(struct mm_struct * mm,unsigned long ua,unsigned long entries,struct mm_iommu_table_group_mem_t ** pmem)129 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
130 		struct mm_iommu_table_group_mem_t **pmem)
131 {
132 	struct mm_iommu_table_group_mem_t *mem;
133 	long i, j, ret = 0, locked_entries = 0;
134 	unsigned int pageshift;
135 	unsigned long flags;
136 	unsigned long cur_ua;
137 	struct page *page = NULL;
138 
139 	mutex_lock(&mem_list_mutex);
140 
141 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
142 			next) {
143 		if ((mem->ua == ua) && (mem->entries == entries)) {
144 			++mem->used;
145 			*pmem = mem;
146 			goto unlock_exit;
147 		}
148 
149 		/* Overlap? */
150 		if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
151 				(ua < (mem->ua +
152 				       (mem->entries << PAGE_SHIFT)))) {
153 			ret = -EINVAL;
154 			goto unlock_exit;
155 		}
156 
157 	}
158 
159 	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
160 	if (ret)
161 		goto unlock_exit;
162 
163 	locked_entries = entries;
164 
165 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
166 	if (!mem) {
167 		ret = -ENOMEM;
168 		goto unlock_exit;
169 	}
170 
171 	/*
172 	 * For a starting point for a maximum page size calculation
173 	 * we use @ua and @entries natural alignment to allow IOMMU pages
174 	 * smaller than huge pages but still bigger than PAGE_SIZE.
175 	 */
176 	mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
177 	mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
178 	if (!mem->hpas) {
179 		kfree(mem);
180 		ret = -ENOMEM;
181 		goto unlock_exit;
182 	}
183 
184 	for (i = 0; i < entries; ++i) {
185 		cur_ua = ua + (i << PAGE_SHIFT);
186 		if (1 != get_user_pages_fast(cur_ua,
187 					1/* pages */, 1/* iswrite */, &page)) {
188 			ret = -EFAULT;
189 			for (j = 0; j < i; ++j)
190 				put_page(pfn_to_page(mem->hpas[j] >>
191 						PAGE_SHIFT));
192 			vfree(mem->hpas);
193 			kfree(mem);
194 			goto unlock_exit;
195 		}
196 		/*
197 		 * If we get a page from the CMA zone, since we are going to
198 		 * be pinning these entries, we might as well move them out
199 		 * of the CMA zone if possible. NOTE: faulting in + migration
200 		 * can be expensive. Batching can be considered later
201 		 */
202 		if (is_migrate_cma_page(page)) {
203 			if (mm_iommu_move_page_from_cma(page))
204 				goto populate;
205 			if (1 != get_user_pages_fast(cur_ua,
206 						1/* pages */, 1/* iswrite */,
207 						&page)) {
208 				ret = -EFAULT;
209 				for (j = 0; j < i; ++j)
210 					put_page(pfn_to_page(mem->hpas[j] >>
211 								PAGE_SHIFT));
212 				vfree(mem->hpas);
213 				kfree(mem);
214 				goto unlock_exit;
215 			}
216 		}
217 populate:
218 		pageshift = PAGE_SHIFT;
219 		if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
220 			pte_t *pte;
221 			struct page *head = compound_head(page);
222 			unsigned int compshift = compound_order(head);
223 			unsigned int pteshift;
224 
225 			local_irq_save(flags); /* disables as well */
226 			pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
227 
228 			/* Double check it is still the same pinned page */
229 			if (pte && pte_page(*pte) == head &&
230 			    pteshift == compshift + PAGE_SHIFT)
231 				pageshift = max_t(unsigned int, pteshift,
232 						PAGE_SHIFT);
233 			local_irq_restore(flags);
234 		}
235 		mem->pageshift = min(mem->pageshift, pageshift);
236 		mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
237 	}
238 
239 	atomic64_set(&mem->mapped, 1);
240 	mem->used = 1;
241 	mem->ua = ua;
242 	mem->entries = entries;
243 	*pmem = mem;
244 
245 	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
246 
247 unlock_exit:
248 	if (locked_entries && ret)
249 		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
250 
251 	mutex_unlock(&mem_list_mutex);
252 
253 	return ret;
254 }
255 EXPORT_SYMBOL_GPL(mm_iommu_get);
256 
mm_iommu_unpin(struct mm_iommu_table_group_mem_t * mem)257 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
258 {
259 	long i;
260 	struct page *page = NULL;
261 
262 	for (i = 0; i < mem->entries; ++i) {
263 		if (!mem->hpas[i])
264 			continue;
265 
266 		page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
267 		if (!page)
268 			continue;
269 
270 		if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
271 			SetPageDirty(page);
272 
273 		put_page(page);
274 		mem->hpas[i] = 0;
275 	}
276 }
277 
mm_iommu_do_free(struct mm_iommu_table_group_mem_t * mem)278 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
279 {
280 
281 	mm_iommu_unpin(mem);
282 	vfree(mem->hpas);
283 	kfree(mem);
284 }
285 
mm_iommu_free(struct rcu_head * head)286 static void mm_iommu_free(struct rcu_head *head)
287 {
288 	struct mm_iommu_table_group_mem_t *mem = container_of(head,
289 			struct mm_iommu_table_group_mem_t, rcu);
290 
291 	mm_iommu_do_free(mem);
292 }
293 
mm_iommu_release(struct mm_iommu_table_group_mem_t * mem)294 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
295 {
296 	list_del_rcu(&mem->next);
297 	call_rcu(&mem->rcu, mm_iommu_free);
298 }
299 
mm_iommu_put(struct mm_struct * mm,struct mm_iommu_table_group_mem_t * mem)300 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
301 {
302 	long ret = 0;
303 
304 	mutex_lock(&mem_list_mutex);
305 
306 	if (mem->used == 0) {
307 		ret = -ENOENT;
308 		goto unlock_exit;
309 	}
310 
311 	--mem->used;
312 	/* There are still users, exit */
313 	if (mem->used)
314 		goto unlock_exit;
315 
316 	/* Are there still mappings? */
317 	if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
318 		++mem->used;
319 		ret = -EBUSY;
320 		goto unlock_exit;
321 	}
322 
323 	/* @mapped became 0 so now mappings are disabled, release the region */
324 	mm_iommu_release(mem);
325 
326 	mm_iommu_adjust_locked_vm(mm, mem->entries, false);
327 
328 unlock_exit:
329 	mutex_unlock(&mem_list_mutex);
330 
331 	return ret;
332 }
333 EXPORT_SYMBOL_GPL(mm_iommu_put);
334 
mm_iommu_lookup(struct mm_struct * mm,unsigned long ua,unsigned long size)335 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
336 		unsigned long ua, unsigned long size)
337 {
338 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
339 
340 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
341 		if ((mem->ua <= ua) &&
342 				(ua + size <= mem->ua +
343 				 (mem->entries << PAGE_SHIFT))) {
344 			ret = mem;
345 			break;
346 		}
347 	}
348 
349 	return ret;
350 }
351 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
352 
mm_iommu_lookup_rm(struct mm_struct * mm,unsigned long ua,unsigned long size)353 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
354 		unsigned long ua, unsigned long size)
355 {
356 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
357 
358 	list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
359 			next) {
360 		if ((mem->ua <= ua) &&
361 				(ua + size <= mem->ua +
362 				 (mem->entries << PAGE_SHIFT))) {
363 			ret = mem;
364 			break;
365 		}
366 	}
367 
368 	return ret;
369 }
370 
mm_iommu_find(struct mm_struct * mm,unsigned long ua,unsigned long entries)371 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
372 		unsigned long ua, unsigned long entries)
373 {
374 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
375 
376 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
377 		if ((mem->ua == ua) && (mem->entries == entries)) {
378 			ret = mem;
379 			break;
380 		}
381 	}
382 
383 	return ret;
384 }
385 EXPORT_SYMBOL_GPL(mm_iommu_find);
386 
mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)387 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
388 		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
389 {
390 	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
391 	u64 *va = &mem->hpas[entry];
392 
393 	if (entry >= mem->entries)
394 		return -EFAULT;
395 
396 	if (pageshift > mem->pageshift)
397 		return -EFAULT;
398 
399 	*hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
400 
401 	return 0;
402 }
403 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
404 
mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)405 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
406 		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
407 {
408 	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
409 	void *va = &mem->hpas[entry];
410 	unsigned long *pa;
411 
412 	if (entry >= mem->entries)
413 		return -EFAULT;
414 
415 	if (pageshift > mem->pageshift)
416 		return -EFAULT;
417 
418 	pa = (void *) vmalloc_to_phys(va);
419 	if (!pa)
420 		return -EFAULT;
421 
422 	*hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
423 
424 	return 0;
425 }
426 
mm_iommu_ua_mark_dirty_rm(struct mm_struct * mm,unsigned long ua)427 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
428 {
429 	struct mm_iommu_table_group_mem_t *mem;
430 	long entry;
431 	void *va;
432 	unsigned long *pa;
433 
434 	mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
435 	if (!mem)
436 		return;
437 
438 	entry = (ua - mem->ua) >> PAGE_SHIFT;
439 	va = &mem->hpas[entry];
440 
441 	pa = (void *) vmalloc_to_phys(va);
442 	if (!pa)
443 		return;
444 
445 	*pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
446 }
447 
mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t * mem)448 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
449 {
450 	if (atomic64_inc_not_zero(&mem->mapped))
451 		return 0;
452 
453 	/* Last mm_iommu_put() has been called, no more mappings allowed() */
454 	return -ENXIO;
455 }
456 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
457 
mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t * mem)458 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
459 {
460 	atomic64_add_unless(&mem->mapped, -1, 1);
461 }
462 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
463 
mm_iommu_init(struct mm_struct * mm)464 void mm_iommu_init(struct mm_struct *mm)
465 {
466 	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
467 }
468