1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  *
12  * We arbitrarily define a Type1 IOMMU as one matching the below code.
13  * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
14  * VT-d, but that makes it harder to re-use as theoretically anyone
15  * implementing a similar IOMMU could make use of this.  We expect the
16  * IOMMU to support the IOMMU API and have few to no restrictions around
17  * the IOVA range that can be mapped.  The Type1 IOMMU is currently
18  * optimized for relatively static mappings of a userspace process with
19  * userpsace pages pinned into memory.  We also assume devices and IOMMU
20  * domains are PCI based as the IOMMU API is still centered around a
21  * device/bus interface rather than a group interface.
22  */
23 
24 #include <linux/compat.h>
25 #include <linux/device.h>
26 #include <linux/fs.h>
27 #include <linux/iommu.h>
28 #include <linux/module.h>
29 #include <linux/mm.h>
30 #include <linux/kthread.h>
31 #include <linux/rbtree.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/vfio.h>
37 #include <linux/workqueue.h>
38 #include <linux/mdev.h>
39 #include <linux/notifier.h>
40 #include <linux/dma-iommu.h>
41 #include <linux/irqdomain.h>
42 
43 #define DRIVER_VERSION  "0.2"
44 #define DRIVER_AUTHOR   "Alex Williamson <alex.williamson@redhat.com>"
45 #define DRIVER_DESC     "Type1 IOMMU driver for VFIO"
46 
47 static bool allow_unsafe_interrupts;
48 module_param_named(allow_unsafe_interrupts,
49 		   allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(allow_unsafe_interrupts,
51 		 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
52 
53 static bool disable_hugepages;
54 module_param_named(disable_hugepages,
55 		   disable_hugepages, bool, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(disable_hugepages,
57 		 "Disable VFIO IOMMU support for IOMMU hugepages.");
58 
59 static unsigned int dma_entry_limit __read_mostly = U16_MAX;
60 module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
61 MODULE_PARM_DESC(dma_entry_limit,
62 		 "Maximum number of user DMA mappings per container (65535).");
63 
64 struct vfio_iommu {
65 	struct list_head	domain_list;
66 	struct list_head	iova_list;
67 	struct vfio_domain	*external_domain; /* domain for external user */
68 	struct mutex		lock;
69 	struct rb_root		dma_list;
70 	struct blocking_notifier_head notifier;
71 	unsigned int		dma_avail;
72 	uint64_t		pgsize_bitmap;
73 	bool			v2;
74 	bool			nesting;
75 	bool			dirty_page_tracking;
76 	bool			pinned_page_dirty_scope;
77 };
78 
79 struct vfio_domain {
80 	struct iommu_domain	*domain;
81 	struct list_head	next;
82 	struct list_head	group_list;
83 	int			prot;		/* IOMMU_CACHE */
84 	bool			fgsp;		/* Fine-grained super pages */
85 };
86 
87 struct vfio_dma {
88 	struct rb_node		node;
89 	dma_addr_t		iova;		/* Device address */
90 	unsigned long		vaddr;		/* Process virtual addr */
91 	size_t			size;		/* Map size (bytes) */
92 	int			prot;		/* IOMMU_READ/WRITE */
93 	bool			iommu_mapped;
94 	bool			lock_cap;	/* capable(CAP_IPC_LOCK) */
95 	struct task_struct	*task;
96 	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
97 	unsigned long		*bitmap;
98 };
99 
100 struct vfio_group {
101 	struct iommu_group	*iommu_group;
102 	struct list_head	next;
103 	bool			mdev_group;	/* An mdev group */
104 	bool			pinned_page_dirty_scope;
105 };
106 
107 struct vfio_iova {
108 	struct list_head	list;
109 	dma_addr_t		start;
110 	dma_addr_t		end;
111 };
112 
113 /*
114  * Guest RAM pinning working set or DMA target
115  */
116 struct vfio_pfn {
117 	struct rb_node		node;
118 	dma_addr_t		iova;		/* Device address */
119 	unsigned long		pfn;		/* Host pfn */
120 	unsigned int		ref_count;
121 };
122 
123 struct vfio_regions {
124 	struct list_head list;
125 	dma_addr_t iova;
126 	phys_addr_t phys;
127 	size_t len;
128 };
129 
130 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)	\
131 					(!list_empty(&iommu->domain_list))
132 
133 #define DIRTY_BITMAP_BYTES(n)	(ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
134 
135 /*
136  * Input argument of number of bits to bitmap_set() is unsigned integer, which
137  * further casts to signed integer for unaligned multi-bit operation,
138  * __bitmap_set().
139  * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
140  * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
141  * system.
142  */
143 #define DIRTY_BITMAP_PAGES_MAX	 ((u64)INT_MAX)
144 #define DIRTY_BITMAP_SIZE_MAX	 DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
145 
146 static int put_pfn(unsigned long pfn, int prot);
147 
148 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
149 					       struct iommu_group *iommu_group);
150 
151 static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
152 /*
153  * This code handles mapping and unmapping of user data buffers
154  * into DMA'ble space using the IOMMU
155  */
156 
vfio_find_dma(struct vfio_iommu * iommu,dma_addr_t start,size_t size)157 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
158 				      dma_addr_t start, size_t size)
159 {
160 	struct rb_node *node = iommu->dma_list.rb_node;
161 
162 	while (node) {
163 		struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
164 
165 		if (start + size <= dma->iova)
166 			node = node->rb_left;
167 		else if (start >= dma->iova + dma->size)
168 			node = node->rb_right;
169 		else
170 			return dma;
171 	}
172 
173 	return NULL;
174 }
175 
vfio_link_dma(struct vfio_iommu * iommu,struct vfio_dma * new)176 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
177 {
178 	struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
179 	struct vfio_dma *dma;
180 
181 	while (*link) {
182 		parent = *link;
183 		dma = rb_entry(parent, struct vfio_dma, node);
184 
185 		if (new->iova + new->size <= dma->iova)
186 			link = &(*link)->rb_left;
187 		else
188 			link = &(*link)->rb_right;
189 	}
190 
191 	rb_link_node(&new->node, parent, link);
192 	rb_insert_color(&new->node, &iommu->dma_list);
193 }
194 
vfio_unlink_dma(struct vfio_iommu * iommu,struct vfio_dma * old)195 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
196 {
197 	rb_erase(&old->node, &iommu->dma_list);
198 }
199 
200 
vfio_dma_bitmap_alloc(struct vfio_dma * dma,size_t pgsize)201 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
202 {
203 	uint64_t npages = dma->size / pgsize;
204 
205 	if (npages > DIRTY_BITMAP_PAGES_MAX)
206 		return -EINVAL;
207 
208 	/*
209 	 * Allocate extra 64 bits that are used to calculate shift required for
210 	 * bitmap_shift_left() to manipulate and club unaligned number of pages
211 	 * in adjacent vfio_dma ranges.
212 	 */
213 	dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
214 			       GFP_KERNEL);
215 	if (!dma->bitmap)
216 		return -ENOMEM;
217 
218 	return 0;
219 }
220 
vfio_dma_bitmap_free(struct vfio_dma * dma)221 static void vfio_dma_bitmap_free(struct vfio_dma *dma)
222 {
223 	kfree(dma->bitmap);
224 	dma->bitmap = NULL;
225 }
226 
vfio_dma_populate_bitmap(struct vfio_dma * dma,size_t pgsize)227 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
228 {
229 	struct rb_node *p;
230 	unsigned long pgshift = __ffs(pgsize);
231 
232 	for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
233 		struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
234 
235 		bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
236 	}
237 }
238 
vfio_dma_bitmap_alloc_all(struct vfio_iommu * iommu,size_t pgsize)239 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
240 {
241 	struct rb_node *n;
242 
243 	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
244 		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
245 		int ret;
246 
247 		ret = vfio_dma_bitmap_alloc(dma, pgsize);
248 		if (ret) {
249 			struct rb_node *p;
250 
251 			for (p = rb_prev(n); p; p = rb_prev(p)) {
252 				struct vfio_dma *dma = rb_entry(n,
253 							struct vfio_dma, node);
254 
255 				vfio_dma_bitmap_free(dma);
256 			}
257 			return ret;
258 		}
259 		vfio_dma_populate_bitmap(dma, pgsize);
260 	}
261 	return 0;
262 }
263 
vfio_dma_bitmap_free_all(struct vfio_iommu * iommu)264 static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
265 {
266 	struct rb_node *n;
267 
268 	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
269 		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
270 
271 		vfio_dma_bitmap_free(dma);
272 	}
273 }
274 
275 /*
276  * Helper Functions for host iova-pfn list
277  */
vfio_find_vpfn(struct vfio_dma * dma,dma_addr_t iova)278 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
279 {
280 	struct vfio_pfn *vpfn;
281 	struct rb_node *node = dma->pfn_list.rb_node;
282 
283 	while (node) {
284 		vpfn = rb_entry(node, struct vfio_pfn, node);
285 
286 		if (iova < vpfn->iova)
287 			node = node->rb_left;
288 		else if (iova > vpfn->iova)
289 			node = node->rb_right;
290 		else
291 			return vpfn;
292 	}
293 	return NULL;
294 }
295 
vfio_link_pfn(struct vfio_dma * dma,struct vfio_pfn * new)296 static void vfio_link_pfn(struct vfio_dma *dma,
297 			  struct vfio_pfn *new)
298 {
299 	struct rb_node **link, *parent = NULL;
300 	struct vfio_pfn *vpfn;
301 
302 	link = &dma->pfn_list.rb_node;
303 	while (*link) {
304 		parent = *link;
305 		vpfn = rb_entry(parent, struct vfio_pfn, node);
306 
307 		if (new->iova < vpfn->iova)
308 			link = &(*link)->rb_left;
309 		else
310 			link = &(*link)->rb_right;
311 	}
312 
313 	rb_link_node(&new->node, parent, link);
314 	rb_insert_color(&new->node, &dma->pfn_list);
315 }
316 
vfio_unlink_pfn(struct vfio_dma * dma,struct vfio_pfn * old)317 static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
318 {
319 	rb_erase(&old->node, &dma->pfn_list);
320 }
321 
vfio_add_to_pfn_list(struct vfio_dma * dma,dma_addr_t iova,unsigned long pfn)322 static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
323 				unsigned long pfn)
324 {
325 	struct vfio_pfn *vpfn;
326 
327 	vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
328 	if (!vpfn)
329 		return -ENOMEM;
330 
331 	vpfn->iova = iova;
332 	vpfn->pfn = pfn;
333 	vpfn->ref_count = 1;
334 	vfio_link_pfn(dma, vpfn);
335 	return 0;
336 }
337 
vfio_remove_from_pfn_list(struct vfio_dma * dma,struct vfio_pfn * vpfn)338 static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
339 				      struct vfio_pfn *vpfn)
340 {
341 	vfio_unlink_pfn(dma, vpfn);
342 	kfree(vpfn);
343 }
344 
vfio_iova_get_vfio_pfn(struct vfio_dma * dma,unsigned long iova)345 static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
346 					       unsigned long iova)
347 {
348 	struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
349 
350 	if (vpfn)
351 		vpfn->ref_count++;
352 	return vpfn;
353 }
354 
vfio_iova_put_vfio_pfn(struct vfio_dma * dma,struct vfio_pfn * vpfn)355 static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
356 {
357 	int ret = 0;
358 
359 	vpfn->ref_count--;
360 	if (!vpfn->ref_count) {
361 		ret = put_pfn(vpfn->pfn, dma->prot);
362 		vfio_remove_from_pfn_list(dma, vpfn);
363 	}
364 	return ret;
365 }
366 
vfio_lock_acct(struct vfio_dma * dma,long npage,bool async)367 static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
368 {
369 	struct mm_struct *mm;
370 	int ret;
371 
372 	if (!npage)
373 		return 0;
374 
375 	mm = async ? get_task_mm(dma->task) : dma->task->mm;
376 	if (!mm)
377 		return -ESRCH; /* process exited */
378 
379 	ret = mmap_write_lock_killable(mm);
380 	if (!ret) {
381 		ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
382 					  dma->lock_cap);
383 		mmap_write_unlock(mm);
384 	}
385 
386 	if (async)
387 		mmput(mm);
388 
389 	return ret;
390 }
391 
392 /*
393  * Some mappings aren't backed by a struct page, for example an mmap'd
394  * MMIO range for our own or another device.  These use a different
395  * pfn conversion and shouldn't be tracked as locked pages.
396  * For compound pages, any driver that sets the reserved bit in head
397  * page needs to set the reserved bit in all subpages to be safe.
398  */
is_invalid_reserved_pfn(unsigned long pfn)399 static bool is_invalid_reserved_pfn(unsigned long pfn)
400 {
401 	if (pfn_valid(pfn))
402 		return PageReserved(pfn_to_page(pfn));
403 
404 	return true;
405 }
406 
put_pfn(unsigned long pfn,int prot)407 static int put_pfn(unsigned long pfn, int prot)
408 {
409 	if (!is_invalid_reserved_pfn(pfn)) {
410 		struct page *page = pfn_to_page(pfn);
411 
412 		unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
413 		return 1;
414 	}
415 	return 0;
416 }
417 
follow_fault_pfn(struct vm_area_struct * vma,struct mm_struct * mm,unsigned long vaddr,unsigned long * pfn,bool write_fault)418 static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
419 			    unsigned long vaddr, unsigned long *pfn,
420 			    bool write_fault)
421 {
422 	int ret;
423 
424 	ret = follow_pfn(vma, vaddr, pfn);
425 	if (ret) {
426 		bool unlocked = false;
427 
428 		ret = fixup_user_fault(mm, vaddr,
429 				       FAULT_FLAG_REMOTE |
430 				       (write_fault ?  FAULT_FLAG_WRITE : 0),
431 				       &unlocked);
432 		if (unlocked)
433 			return -EAGAIN;
434 
435 		if (ret)
436 			return ret;
437 
438 		ret = follow_pfn(vma, vaddr, pfn);
439 	}
440 
441 	return ret;
442 }
443 
vaddr_get_pfn(struct mm_struct * mm,unsigned long vaddr,int prot,unsigned long * pfn)444 static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
445 			 int prot, unsigned long *pfn)
446 {
447 	struct page *page[1];
448 	struct vm_area_struct *vma;
449 	unsigned int flags = 0;
450 	int ret;
451 
452 	if (prot & IOMMU_WRITE)
453 		flags |= FOLL_WRITE;
454 
455 	mmap_read_lock(mm);
456 	ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
457 				    page, NULL, NULL);
458 	if (ret == 1) {
459 		*pfn = page_to_pfn(page[0]);
460 		ret = 0;
461 		goto done;
462 	}
463 
464 	vaddr = untagged_addr(vaddr);
465 
466 retry:
467 	vma = find_vma_intersection(mm, vaddr, vaddr + 1);
468 
469 	if (vma && vma->vm_flags & VM_PFNMAP) {
470 		ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
471 		if (ret == -EAGAIN)
472 			goto retry;
473 
474 		if (!ret && !is_invalid_reserved_pfn(*pfn))
475 			ret = -EFAULT;
476 	}
477 done:
478 	mmap_read_unlock(mm);
479 	return ret;
480 }
481 
482 /*
483  * Attempt to pin pages.  We really don't want to track all the pfns and
484  * the iommu can only map chunks of consecutive pfns anyway, so get the
485  * first page and all consecutive pages with the same locking.
486  */
vfio_pin_pages_remote(struct vfio_dma * dma,unsigned long vaddr,long npage,unsigned long * pfn_base,unsigned long limit)487 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
488 				  long npage, unsigned long *pfn_base,
489 				  unsigned long limit)
490 {
491 	unsigned long pfn = 0;
492 	long ret, pinned = 0, lock_acct = 0;
493 	bool rsvd;
494 	dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
495 
496 	/* This code path is only user initiated */
497 	if (!current->mm)
498 		return -ENODEV;
499 
500 	ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
501 	if (ret)
502 		return ret;
503 
504 	pinned++;
505 	rsvd = is_invalid_reserved_pfn(*pfn_base);
506 
507 	/*
508 	 * Reserved pages aren't counted against the user, externally pinned
509 	 * pages are already counted against the user.
510 	 */
511 	if (!rsvd && !vfio_find_vpfn(dma, iova)) {
512 		if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
513 			put_pfn(*pfn_base, dma->prot);
514 			pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
515 					limit << PAGE_SHIFT);
516 			return -ENOMEM;
517 		}
518 		lock_acct++;
519 	}
520 
521 	if (unlikely(disable_hugepages))
522 		goto out;
523 
524 	/* Lock all the consecutive pages from pfn_base */
525 	for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
526 	     pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
527 		ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
528 		if (ret)
529 			break;
530 
531 		if (pfn != *pfn_base + pinned ||
532 		    rsvd != is_invalid_reserved_pfn(pfn)) {
533 			put_pfn(pfn, dma->prot);
534 			break;
535 		}
536 
537 		if (!rsvd && !vfio_find_vpfn(dma, iova)) {
538 			if (!dma->lock_cap &&
539 			    current->mm->locked_vm + lock_acct + 1 > limit) {
540 				put_pfn(pfn, dma->prot);
541 				pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
542 					__func__, limit << PAGE_SHIFT);
543 				ret = -ENOMEM;
544 				goto unpin_out;
545 			}
546 			lock_acct++;
547 		}
548 	}
549 
550 out:
551 	ret = vfio_lock_acct(dma, lock_acct, false);
552 
553 unpin_out:
554 	if (ret) {
555 		if (!rsvd) {
556 			for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
557 				put_pfn(pfn, dma->prot);
558 		}
559 
560 		return ret;
561 	}
562 
563 	return pinned;
564 }
565 
vfio_unpin_pages_remote(struct vfio_dma * dma,dma_addr_t iova,unsigned long pfn,long npage,bool do_accounting)566 static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
567 				    unsigned long pfn, long npage,
568 				    bool do_accounting)
569 {
570 	long unlocked = 0, locked = 0;
571 	long i;
572 
573 	for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
574 		if (put_pfn(pfn++, dma->prot)) {
575 			unlocked++;
576 			if (vfio_find_vpfn(dma, iova))
577 				locked++;
578 		}
579 	}
580 
581 	if (do_accounting)
582 		vfio_lock_acct(dma, locked - unlocked, true);
583 
584 	return unlocked;
585 }
586 
vfio_pin_page_external(struct vfio_dma * dma,unsigned long vaddr,unsigned long * pfn_base,bool do_accounting)587 static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
588 				  unsigned long *pfn_base, bool do_accounting)
589 {
590 	struct mm_struct *mm;
591 	int ret;
592 
593 	mm = get_task_mm(dma->task);
594 	if (!mm)
595 		return -ENODEV;
596 
597 	ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
598 	if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
599 		ret = vfio_lock_acct(dma, 1, true);
600 		if (ret) {
601 			put_pfn(*pfn_base, dma->prot);
602 			if (ret == -ENOMEM)
603 				pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
604 					"(%ld) exceeded\n", __func__,
605 					dma->task->comm, task_pid_nr(dma->task),
606 					task_rlimit(dma->task, RLIMIT_MEMLOCK));
607 		}
608 	}
609 
610 	mmput(mm);
611 	return ret;
612 }
613 
vfio_unpin_page_external(struct vfio_dma * dma,dma_addr_t iova,bool do_accounting)614 static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
615 				    bool do_accounting)
616 {
617 	int unlocked;
618 	struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
619 
620 	if (!vpfn)
621 		return 0;
622 
623 	unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
624 
625 	if (do_accounting)
626 		vfio_lock_acct(dma, -unlocked, true);
627 
628 	return unlocked;
629 }
630 
vfio_iommu_type1_pin_pages(void * iommu_data,struct iommu_group * iommu_group,unsigned long * user_pfn,int npage,int prot,unsigned long * phys_pfn)631 static int vfio_iommu_type1_pin_pages(void *iommu_data,
632 				      struct iommu_group *iommu_group,
633 				      unsigned long *user_pfn,
634 				      int npage, int prot,
635 				      unsigned long *phys_pfn)
636 {
637 	struct vfio_iommu *iommu = iommu_data;
638 	struct vfio_group *group;
639 	int i, j, ret;
640 	unsigned long remote_vaddr;
641 	struct vfio_dma *dma;
642 	bool do_accounting;
643 
644 	if (!iommu || !user_pfn || !phys_pfn)
645 		return -EINVAL;
646 
647 	/* Supported for v2 version only */
648 	if (!iommu->v2)
649 		return -EACCES;
650 
651 	mutex_lock(&iommu->lock);
652 
653 	/* Fail if notifier list is empty */
654 	if (!iommu->notifier.head) {
655 		ret = -EINVAL;
656 		goto pin_done;
657 	}
658 
659 	/*
660 	 * If iommu capable domain exist in the container then all pages are
661 	 * already pinned and accounted. Accouting should be done if there is no
662 	 * iommu capable domain in the container.
663 	 */
664 	do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
665 
666 	for (i = 0; i < npage; i++) {
667 		dma_addr_t iova;
668 		struct vfio_pfn *vpfn;
669 
670 		iova = user_pfn[i] << PAGE_SHIFT;
671 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
672 		if (!dma) {
673 			ret = -EINVAL;
674 			goto pin_unwind;
675 		}
676 
677 		if ((dma->prot & prot) != prot) {
678 			ret = -EPERM;
679 			goto pin_unwind;
680 		}
681 
682 		vpfn = vfio_iova_get_vfio_pfn(dma, iova);
683 		if (vpfn) {
684 			phys_pfn[i] = vpfn->pfn;
685 			continue;
686 		}
687 
688 		remote_vaddr = dma->vaddr + (iova - dma->iova);
689 		ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
690 					     do_accounting);
691 		if (ret)
692 			goto pin_unwind;
693 
694 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
695 		if (ret) {
696 			if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
697 				vfio_lock_acct(dma, -1, true);
698 			goto pin_unwind;
699 		}
700 
701 		if (iommu->dirty_page_tracking) {
702 			unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
703 
704 			/*
705 			 * Bitmap populated with the smallest supported page
706 			 * size
707 			 */
708 			bitmap_set(dma->bitmap,
709 				   (iova - dma->iova) >> pgshift, 1);
710 		}
711 	}
712 	ret = i;
713 
714 	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
715 	if (!group->pinned_page_dirty_scope) {
716 		group->pinned_page_dirty_scope = true;
717 		update_pinned_page_dirty_scope(iommu);
718 	}
719 
720 	goto pin_done;
721 
722 pin_unwind:
723 	phys_pfn[i] = 0;
724 	for (j = 0; j < i; j++) {
725 		dma_addr_t iova;
726 
727 		iova = user_pfn[j] << PAGE_SHIFT;
728 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
729 		vfio_unpin_page_external(dma, iova, do_accounting);
730 		phys_pfn[j] = 0;
731 	}
732 pin_done:
733 	mutex_unlock(&iommu->lock);
734 	return ret;
735 }
736 
vfio_iommu_type1_unpin_pages(void * iommu_data,unsigned long * user_pfn,int npage)737 static int vfio_iommu_type1_unpin_pages(void *iommu_data,
738 					unsigned long *user_pfn,
739 					int npage)
740 {
741 	struct vfio_iommu *iommu = iommu_data;
742 	bool do_accounting;
743 	int i;
744 
745 	if (!iommu || !user_pfn)
746 		return -EINVAL;
747 
748 	/* Supported for v2 version only */
749 	if (!iommu->v2)
750 		return -EACCES;
751 
752 	mutex_lock(&iommu->lock);
753 
754 	do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
755 	for (i = 0; i < npage; i++) {
756 		struct vfio_dma *dma;
757 		dma_addr_t iova;
758 
759 		iova = user_pfn[i] << PAGE_SHIFT;
760 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
761 		if (!dma)
762 			goto unpin_exit;
763 		vfio_unpin_page_external(dma, iova, do_accounting);
764 	}
765 
766 unpin_exit:
767 	mutex_unlock(&iommu->lock);
768 	return i > npage ? npage : (i > 0 ? i : -EINVAL);
769 }
770 
vfio_sync_unpin(struct vfio_dma * dma,struct vfio_domain * domain,struct list_head * regions,struct iommu_iotlb_gather * iotlb_gather)771 static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
772 			    struct list_head *regions,
773 			    struct iommu_iotlb_gather *iotlb_gather)
774 {
775 	long unlocked = 0;
776 	struct vfio_regions *entry, *next;
777 
778 	iommu_iotlb_sync(domain->domain, iotlb_gather);
779 
780 	list_for_each_entry_safe(entry, next, regions, list) {
781 		unlocked += vfio_unpin_pages_remote(dma,
782 						    entry->iova,
783 						    entry->phys >> PAGE_SHIFT,
784 						    entry->len >> PAGE_SHIFT,
785 						    false);
786 		list_del(&entry->list);
787 		kfree(entry);
788 	}
789 
790 	cond_resched();
791 
792 	return unlocked;
793 }
794 
795 /*
796  * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
797  * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
798  * of these regions (currently using a list).
799  *
800  * This value specifies maximum number of regions for each IOTLB flush sync.
801  */
802 #define VFIO_IOMMU_TLB_SYNC_MAX		512
803 
unmap_unpin_fast(struct vfio_domain * domain,struct vfio_dma * dma,dma_addr_t * iova,size_t len,phys_addr_t phys,long * unlocked,struct list_head * unmapped_list,int * unmapped_cnt,struct iommu_iotlb_gather * iotlb_gather)804 static size_t unmap_unpin_fast(struct vfio_domain *domain,
805 			       struct vfio_dma *dma, dma_addr_t *iova,
806 			       size_t len, phys_addr_t phys, long *unlocked,
807 			       struct list_head *unmapped_list,
808 			       int *unmapped_cnt,
809 			       struct iommu_iotlb_gather *iotlb_gather)
810 {
811 	size_t unmapped = 0;
812 	struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
813 
814 	if (entry) {
815 		unmapped = iommu_unmap_fast(domain->domain, *iova, len,
816 					    iotlb_gather);
817 
818 		if (!unmapped) {
819 			kfree(entry);
820 		} else {
821 			entry->iova = *iova;
822 			entry->phys = phys;
823 			entry->len  = unmapped;
824 			list_add_tail(&entry->list, unmapped_list);
825 
826 			*iova += unmapped;
827 			(*unmapped_cnt)++;
828 		}
829 	}
830 
831 	/*
832 	 * Sync if the number of fast-unmap regions hits the limit
833 	 * or in case of errors.
834 	 */
835 	if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
836 		*unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
837 					     iotlb_gather);
838 		*unmapped_cnt = 0;
839 	}
840 
841 	return unmapped;
842 }
843 
unmap_unpin_slow(struct vfio_domain * domain,struct vfio_dma * dma,dma_addr_t * iova,size_t len,phys_addr_t phys,long * unlocked)844 static size_t unmap_unpin_slow(struct vfio_domain *domain,
845 			       struct vfio_dma *dma, dma_addr_t *iova,
846 			       size_t len, phys_addr_t phys,
847 			       long *unlocked)
848 {
849 	size_t unmapped = iommu_unmap(domain->domain, *iova, len);
850 
851 	if (unmapped) {
852 		*unlocked += vfio_unpin_pages_remote(dma, *iova,
853 						     phys >> PAGE_SHIFT,
854 						     unmapped >> PAGE_SHIFT,
855 						     false);
856 		*iova += unmapped;
857 		cond_resched();
858 	}
859 	return unmapped;
860 }
861 
vfio_unmap_unpin(struct vfio_iommu * iommu,struct vfio_dma * dma,bool do_accounting)862 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
863 			     bool do_accounting)
864 {
865 	dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
866 	struct vfio_domain *domain, *d;
867 	LIST_HEAD(unmapped_region_list);
868 	struct iommu_iotlb_gather iotlb_gather;
869 	int unmapped_region_cnt = 0;
870 	long unlocked = 0;
871 
872 	if (!dma->size)
873 		return 0;
874 
875 	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
876 		return 0;
877 
878 	/*
879 	 * We use the IOMMU to track the physical addresses, otherwise we'd
880 	 * need a much more complicated tracking system.  Unfortunately that
881 	 * means we need to use one of the iommu domains to figure out the
882 	 * pfns to unpin.  The rest need to be unmapped in advance so we have
883 	 * no iommu translations remaining when the pages are unpinned.
884 	 */
885 	domain = d = list_first_entry(&iommu->domain_list,
886 				      struct vfio_domain, next);
887 
888 	list_for_each_entry_continue(d, &iommu->domain_list, next) {
889 		iommu_unmap(d->domain, dma->iova, dma->size);
890 		cond_resched();
891 	}
892 
893 	iommu_iotlb_gather_init(&iotlb_gather);
894 	while (iova < end) {
895 		size_t unmapped, len;
896 		phys_addr_t phys, next;
897 
898 		phys = iommu_iova_to_phys(domain->domain, iova);
899 		if (WARN_ON(!phys)) {
900 			iova += PAGE_SIZE;
901 			continue;
902 		}
903 
904 		/*
905 		 * To optimize for fewer iommu_unmap() calls, each of which
906 		 * may require hardware cache flushing, try to find the
907 		 * largest contiguous physical memory chunk to unmap.
908 		 */
909 		for (len = PAGE_SIZE;
910 		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
911 			next = iommu_iova_to_phys(domain->domain, iova + len);
912 			if (next != phys + len)
913 				break;
914 		}
915 
916 		/*
917 		 * First, try to use fast unmap/unpin. In case of failure,
918 		 * switch to slow unmap/unpin path.
919 		 */
920 		unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
921 					    &unlocked, &unmapped_region_list,
922 					    &unmapped_region_cnt,
923 					    &iotlb_gather);
924 		if (!unmapped) {
925 			unmapped = unmap_unpin_slow(domain, dma, &iova, len,
926 						    phys, &unlocked);
927 			if (WARN_ON(!unmapped))
928 				break;
929 		}
930 	}
931 
932 	dma->iommu_mapped = false;
933 
934 	if (unmapped_region_cnt) {
935 		unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
936 					    &iotlb_gather);
937 	}
938 
939 	if (do_accounting) {
940 		vfio_lock_acct(dma, -unlocked, true);
941 		return 0;
942 	}
943 	return unlocked;
944 }
945 
vfio_remove_dma(struct vfio_iommu * iommu,struct vfio_dma * dma)946 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
947 {
948 	vfio_unmap_unpin(iommu, dma, true);
949 	vfio_unlink_dma(iommu, dma);
950 	put_task_struct(dma->task);
951 	vfio_dma_bitmap_free(dma);
952 	kfree(dma);
953 	iommu->dma_avail++;
954 }
955 
vfio_update_pgsize_bitmap(struct vfio_iommu * iommu)956 static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
957 {
958 	struct vfio_domain *domain;
959 
960 	iommu->pgsize_bitmap = ULONG_MAX;
961 
962 	list_for_each_entry(domain, &iommu->domain_list, next)
963 		iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
964 
965 	/*
966 	 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
967 	 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
968 	 * That way the user will be able to map/unmap buffers whose size/
969 	 * start address is aligned with PAGE_SIZE. Pinning code uses that
970 	 * granularity while iommu driver can use the sub-PAGE_SIZE size
971 	 * to map the buffer.
972 	 */
973 	if (iommu->pgsize_bitmap & ~PAGE_MASK) {
974 		iommu->pgsize_bitmap &= PAGE_MASK;
975 		iommu->pgsize_bitmap |= PAGE_SIZE;
976 	}
977 }
978 
update_user_bitmap(u64 __user * bitmap,struct vfio_iommu * iommu,struct vfio_dma * dma,dma_addr_t base_iova,size_t pgsize)979 static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
980 			      struct vfio_dma *dma, dma_addr_t base_iova,
981 			      size_t pgsize)
982 {
983 	unsigned long pgshift = __ffs(pgsize);
984 	unsigned long nbits = dma->size >> pgshift;
985 	unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
986 	unsigned long copy_offset = bit_offset / BITS_PER_LONG;
987 	unsigned long shift = bit_offset % BITS_PER_LONG;
988 	unsigned long leftover;
989 
990 	/*
991 	 * mark all pages dirty if any IOMMU capable device is not able
992 	 * to report dirty pages and all pages are pinned and mapped.
993 	 */
994 	if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
995 		bitmap_set(dma->bitmap, 0, nbits);
996 
997 	if (shift) {
998 		bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
999 				  nbits + shift);
1000 
1001 		if (copy_from_user(&leftover,
1002 				   (void __user *)(bitmap + copy_offset),
1003 				   sizeof(leftover)))
1004 			return -EFAULT;
1005 
1006 		bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
1007 	}
1008 
1009 	if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
1010 			 DIRTY_BITMAP_BYTES(nbits + shift)))
1011 		return -EFAULT;
1012 
1013 	return 0;
1014 }
1015 
vfio_iova_dirty_bitmap(u64 __user * bitmap,struct vfio_iommu * iommu,dma_addr_t iova,size_t size,size_t pgsize)1016 static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
1017 				  dma_addr_t iova, size_t size, size_t pgsize)
1018 {
1019 	struct vfio_dma *dma;
1020 	struct rb_node *n;
1021 	unsigned long pgshift = __ffs(pgsize);
1022 	int ret;
1023 
1024 	/*
1025 	 * GET_BITMAP request must fully cover vfio_dma mappings.  Multiple
1026 	 * vfio_dma mappings may be clubbed by specifying large ranges, but
1027 	 * there must not be any previous mappings bisected by the range.
1028 	 * An error will be returned if these conditions are not met.
1029 	 */
1030 	dma = vfio_find_dma(iommu, iova, 1);
1031 	if (dma && dma->iova != iova)
1032 		return -EINVAL;
1033 
1034 	dma = vfio_find_dma(iommu, iova + size - 1, 0);
1035 	if (dma && dma->iova + dma->size != iova + size)
1036 		return -EINVAL;
1037 
1038 	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1039 		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1040 
1041 		if (dma->iova < iova)
1042 			continue;
1043 
1044 		if (dma->iova > iova + size - 1)
1045 			break;
1046 
1047 		ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
1048 		if (ret)
1049 			return ret;
1050 
1051 		/*
1052 		 * Re-populate bitmap to include all pinned pages which are
1053 		 * considered as dirty but exclude pages which are unpinned and
1054 		 * pages which are marked dirty by vfio_dma_rw()
1055 		 */
1056 		bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
1057 		vfio_dma_populate_bitmap(dma, pgsize);
1058 	}
1059 	return 0;
1060 }
1061 
verify_bitmap_size(uint64_t npages,uint64_t bitmap_size)1062 static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
1063 {
1064 	if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
1065 	    (bitmap_size < DIRTY_BITMAP_BYTES(npages)))
1066 		return -EINVAL;
1067 
1068 	return 0;
1069 }
1070 
vfio_dma_do_unmap(struct vfio_iommu * iommu,struct vfio_iommu_type1_dma_unmap * unmap,struct vfio_bitmap * bitmap)1071 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
1072 			     struct vfio_iommu_type1_dma_unmap *unmap,
1073 			     struct vfio_bitmap *bitmap)
1074 {
1075 	struct vfio_dma *dma, *dma_last = NULL;
1076 	size_t unmapped = 0, pgsize;
1077 	int ret = 0, retries = 0;
1078 	unsigned long pgshift;
1079 
1080 	mutex_lock(&iommu->lock);
1081 
1082 	pgshift = __ffs(iommu->pgsize_bitmap);
1083 	pgsize = (size_t)1 << pgshift;
1084 
1085 	if (unmap->iova & (pgsize - 1)) {
1086 		ret = -EINVAL;
1087 		goto unlock;
1088 	}
1089 
1090 	if (!unmap->size || unmap->size & (pgsize - 1)) {
1091 		ret = -EINVAL;
1092 		goto unlock;
1093 	}
1094 
1095 	if (unmap->iova + unmap->size - 1 < unmap->iova ||
1096 	    unmap->size > SIZE_MAX) {
1097 		ret = -EINVAL;
1098 		goto unlock;
1099 	}
1100 
1101 	/* When dirty tracking is enabled, allow only min supported pgsize */
1102 	if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
1103 	    (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
1104 		ret = -EINVAL;
1105 		goto unlock;
1106 	}
1107 
1108 	WARN_ON((pgsize - 1) & PAGE_MASK);
1109 again:
1110 	/*
1111 	 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
1112 	 * avoid tracking individual mappings.  This means that the granularity
1113 	 * of the original mapping was lost and the user was allowed to attempt
1114 	 * to unmap any range.  Depending on the contiguousness of physical
1115 	 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
1116 	 * or may not have worked.  We only guaranteed unmap granularity
1117 	 * matching the original mapping; even though it was untracked here,
1118 	 * the original mappings are reflected in IOMMU mappings.  This
1119 	 * resulted in a couple unusual behaviors.  First, if a range is not
1120 	 * able to be unmapped, ex. a set of 4k pages that was mapped as a
1121 	 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
1122 	 * a zero sized unmap.  Also, if an unmap request overlaps the first
1123 	 * address of a hugepage, the IOMMU will unmap the entire hugepage.
1124 	 * This also returns success and the returned unmap size reflects the
1125 	 * actual size unmapped.
1126 	 *
1127 	 * We attempt to maintain compatibility with this "v1" interface, but
1128 	 * we take control out of the hands of the IOMMU.  Therefore, an unmap
1129 	 * request offset from the beginning of the original mapping will
1130 	 * return success with zero sized unmap.  And an unmap request covering
1131 	 * the first iova of mapping will unmap the entire range.
1132 	 *
1133 	 * The v2 version of this interface intends to be more deterministic.
1134 	 * Unmap requests must fully cover previous mappings.  Multiple
1135 	 * mappings may still be unmaped by specifying large ranges, but there
1136 	 * must not be any previous mappings bisected by the range.  An error
1137 	 * will be returned if these conditions are not met.  The v2 interface
1138 	 * will only return success and a size of zero if there were no
1139 	 * mappings within the range.
1140 	 */
1141 	if (iommu->v2) {
1142 		dma = vfio_find_dma(iommu, unmap->iova, 1);
1143 		if (dma && dma->iova != unmap->iova) {
1144 			ret = -EINVAL;
1145 			goto unlock;
1146 		}
1147 		dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
1148 		if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
1149 			ret = -EINVAL;
1150 			goto unlock;
1151 		}
1152 	}
1153 
1154 	while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
1155 		if (!iommu->v2 && unmap->iova > dma->iova)
1156 			break;
1157 		/*
1158 		 * Task with same address space who mapped this iova range is
1159 		 * allowed to unmap the iova range.
1160 		 */
1161 		if (dma->task->mm != current->mm)
1162 			break;
1163 
1164 		if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
1165 			struct vfio_iommu_type1_dma_unmap nb_unmap;
1166 
1167 			if (dma_last == dma) {
1168 				BUG_ON(++retries > 10);
1169 			} else {
1170 				dma_last = dma;
1171 				retries = 0;
1172 			}
1173 
1174 			nb_unmap.iova = dma->iova;
1175 			nb_unmap.size = dma->size;
1176 
1177 			/*
1178 			 * Notify anyone (mdev vendor drivers) to invalidate and
1179 			 * unmap iovas within the range we're about to unmap.
1180 			 * Vendor drivers MUST unpin pages in response to an
1181 			 * invalidation.
1182 			 */
1183 			mutex_unlock(&iommu->lock);
1184 			blocking_notifier_call_chain(&iommu->notifier,
1185 						    VFIO_IOMMU_NOTIFY_DMA_UNMAP,
1186 						    &nb_unmap);
1187 			mutex_lock(&iommu->lock);
1188 			goto again;
1189 		}
1190 
1191 		if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
1192 			ret = update_user_bitmap(bitmap->data, iommu, dma,
1193 						 unmap->iova, pgsize);
1194 			if (ret)
1195 				break;
1196 		}
1197 
1198 		unmapped += dma->size;
1199 		vfio_remove_dma(iommu, dma);
1200 	}
1201 
1202 unlock:
1203 	mutex_unlock(&iommu->lock);
1204 
1205 	/* Report how much was unmapped */
1206 	unmap->size = unmapped;
1207 
1208 	return ret;
1209 }
1210 
vfio_iommu_map(struct vfio_iommu * iommu,dma_addr_t iova,unsigned long pfn,long npage,int prot)1211 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
1212 			  unsigned long pfn, long npage, int prot)
1213 {
1214 	struct vfio_domain *d;
1215 	int ret;
1216 
1217 	list_for_each_entry(d, &iommu->domain_list, next) {
1218 		ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
1219 				npage << PAGE_SHIFT, prot | d->prot);
1220 		if (ret)
1221 			goto unwind;
1222 
1223 		cond_resched();
1224 	}
1225 
1226 	return 0;
1227 
1228 unwind:
1229 	list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
1230 		iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
1231 		cond_resched();
1232 	}
1233 
1234 	return ret;
1235 }
1236 
vfio_pin_map_dma(struct vfio_iommu * iommu,struct vfio_dma * dma,size_t map_size)1237 static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
1238 			    size_t map_size)
1239 {
1240 	dma_addr_t iova = dma->iova;
1241 	unsigned long vaddr = dma->vaddr;
1242 	size_t size = map_size;
1243 	long npage;
1244 	unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1245 	int ret = 0;
1246 
1247 	while (size) {
1248 		/* Pin a contiguous chunk of memory */
1249 		npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
1250 					      size >> PAGE_SHIFT, &pfn, limit);
1251 		if (npage <= 0) {
1252 			WARN_ON(!npage);
1253 			ret = (int)npage;
1254 			break;
1255 		}
1256 
1257 		/* Map it! */
1258 		ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
1259 				     dma->prot);
1260 		if (ret) {
1261 			vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
1262 						npage, true);
1263 			break;
1264 		}
1265 
1266 		size -= npage << PAGE_SHIFT;
1267 		dma->size += npage << PAGE_SHIFT;
1268 	}
1269 
1270 	dma->iommu_mapped = true;
1271 
1272 	if (ret)
1273 		vfio_remove_dma(iommu, dma);
1274 
1275 	return ret;
1276 }
1277 
1278 /*
1279  * Check dma map request is within a valid iova range
1280  */
vfio_iommu_iova_dma_valid(struct vfio_iommu * iommu,dma_addr_t start,dma_addr_t end)1281 static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
1282 				      dma_addr_t start, dma_addr_t end)
1283 {
1284 	struct list_head *iova = &iommu->iova_list;
1285 	struct vfio_iova *node;
1286 
1287 	list_for_each_entry(node, iova, list) {
1288 		if (start >= node->start && end <= node->end)
1289 			return true;
1290 	}
1291 
1292 	/*
1293 	 * Check for list_empty() as well since a container with
1294 	 * a single mdev device will have an empty list.
1295 	 */
1296 	return list_empty(iova);
1297 }
1298 
vfio_dma_do_map(struct vfio_iommu * iommu,struct vfio_iommu_type1_dma_map * map)1299 static int vfio_dma_do_map(struct vfio_iommu *iommu,
1300 			   struct vfio_iommu_type1_dma_map *map)
1301 {
1302 	dma_addr_t iova = map->iova;
1303 	unsigned long vaddr = map->vaddr;
1304 	size_t size = map->size;
1305 	int ret = 0, prot = 0;
1306 	size_t pgsize;
1307 	struct vfio_dma *dma;
1308 
1309 	/* Verify that none of our __u64 fields overflow */
1310 	if (map->size != size || map->vaddr != vaddr || map->iova != iova)
1311 		return -EINVAL;
1312 
1313 	/* READ/WRITE from device perspective */
1314 	if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
1315 		prot |= IOMMU_WRITE;
1316 	if (map->flags & VFIO_DMA_MAP_FLAG_READ)
1317 		prot |= IOMMU_READ;
1318 
1319 	mutex_lock(&iommu->lock);
1320 
1321 	pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
1322 
1323 	WARN_ON((pgsize - 1) & PAGE_MASK);
1324 
1325 	if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
1326 		ret = -EINVAL;
1327 		goto out_unlock;
1328 	}
1329 
1330 	/* Don't allow IOVA or virtual address wrap */
1331 	if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
1332 		ret = -EINVAL;
1333 		goto out_unlock;
1334 	}
1335 
1336 	if (vfio_find_dma(iommu, iova, size)) {
1337 		ret = -EEXIST;
1338 		goto out_unlock;
1339 	}
1340 
1341 	if (!iommu->dma_avail) {
1342 		ret = -ENOSPC;
1343 		goto out_unlock;
1344 	}
1345 
1346 	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
1347 		ret = -EINVAL;
1348 		goto out_unlock;
1349 	}
1350 
1351 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1352 	if (!dma) {
1353 		ret = -ENOMEM;
1354 		goto out_unlock;
1355 	}
1356 
1357 	iommu->dma_avail--;
1358 	dma->iova = iova;
1359 	dma->vaddr = vaddr;
1360 	dma->prot = prot;
1361 
1362 	/*
1363 	 * We need to be able to both add to a task's locked memory and test
1364 	 * against the locked memory limit and we need to be able to do both
1365 	 * outside of this call path as pinning can be asynchronous via the
1366 	 * external interfaces for mdev devices.  RLIMIT_MEMLOCK requires a
1367 	 * task_struct and VM locked pages requires an mm_struct, however
1368 	 * holding an indefinite mm reference is not recommended, therefore we
1369 	 * only hold a reference to a task.  We could hold a reference to
1370 	 * current, however QEMU uses this call path through vCPU threads,
1371 	 * which can be killed resulting in a NULL mm and failure in the unmap
1372 	 * path when called via a different thread.  Avoid this problem by
1373 	 * using the group_leader as threads within the same group require
1374 	 * both CLONE_THREAD and CLONE_VM and will therefore use the same
1375 	 * mm_struct.
1376 	 *
1377 	 * Previously we also used the task for testing CAP_IPC_LOCK at the
1378 	 * time of pinning and accounting, however has_capability() makes use
1379 	 * of real_cred, a copy-on-write field, so we can't guarantee that it
1380 	 * matches group_leader, or in fact that it might not change by the
1381 	 * time it's evaluated.  If a process were to call MAP_DMA with
1382 	 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
1383 	 * possibly see different results for an iommu_mapped vfio_dma vs
1384 	 * externally mapped.  Therefore track CAP_IPC_LOCK in vfio_dma at the
1385 	 * time of calling MAP_DMA.
1386 	 */
1387 	get_task_struct(current->group_leader);
1388 	dma->task = current->group_leader;
1389 	dma->lock_cap = capable(CAP_IPC_LOCK);
1390 
1391 	dma->pfn_list = RB_ROOT;
1392 
1393 	/* Insert zero-sized and grow as we map chunks of it */
1394 	vfio_link_dma(iommu, dma);
1395 
1396 	/* Don't pin and map if container doesn't contain IOMMU capable domain*/
1397 	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1398 		dma->size = size;
1399 	else
1400 		ret = vfio_pin_map_dma(iommu, dma, size);
1401 
1402 	if (!ret && iommu->dirty_page_tracking) {
1403 		ret = vfio_dma_bitmap_alloc(dma, pgsize);
1404 		if (ret)
1405 			vfio_remove_dma(iommu, dma);
1406 	}
1407 
1408 out_unlock:
1409 	mutex_unlock(&iommu->lock);
1410 	return ret;
1411 }
1412 
vfio_bus_type(struct device * dev,void * data)1413 static int vfio_bus_type(struct device *dev, void *data)
1414 {
1415 	struct bus_type **bus = data;
1416 
1417 	if (*bus && *bus != dev->bus)
1418 		return -EINVAL;
1419 
1420 	*bus = dev->bus;
1421 
1422 	return 0;
1423 }
1424 
vfio_iommu_replay(struct vfio_iommu * iommu,struct vfio_domain * domain)1425 static int vfio_iommu_replay(struct vfio_iommu *iommu,
1426 			     struct vfio_domain *domain)
1427 {
1428 	struct vfio_domain *d = NULL;
1429 	struct rb_node *n;
1430 	unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1431 	int ret;
1432 
1433 	/* Arbitrarily pick the first domain in the list for lookups */
1434 	if (!list_empty(&iommu->domain_list))
1435 		d = list_first_entry(&iommu->domain_list,
1436 				     struct vfio_domain, next);
1437 
1438 	n = rb_first(&iommu->dma_list);
1439 
1440 	for (; n; n = rb_next(n)) {
1441 		struct vfio_dma *dma;
1442 		dma_addr_t iova;
1443 
1444 		dma = rb_entry(n, struct vfio_dma, node);
1445 		iova = dma->iova;
1446 
1447 		while (iova < dma->iova + dma->size) {
1448 			phys_addr_t phys;
1449 			size_t size;
1450 
1451 			if (dma->iommu_mapped) {
1452 				phys_addr_t p;
1453 				dma_addr_t i;
1454 
1455 				if (WARN_ON(!d)) { /* mapped w/o a domain?! */
1456 					ret = -EINVAL;
1457 					goto unwind;
1458 				}
1459 
1460 				phys = iommu_iova_to_phys(d->domain, iova);
1461 
1462 				if (WARN_ON(!phys)) {
1463 					iova += PAGE_SIZE;
1464 					continue;
1465 				}
1466 
1467 				size = PAGE_SIZE;
1468 				p = phys + size;
1469 				i = iova + size;
1470 				while (i < dma->iova + dma->size &&
1471 				       p == iommu_iova_to_phys(d->domain, i)) {
1472 					size += PAGE_SIZE;
1473 					p += PAGE_SIZE;
1474 					i += PAGE_SIZE;
1475 				}
1476 			} else {
1477 				unsigned long pfn;
1478 				unsigned long vaddr = dma->vaddr +
1479 						     (iova - dma->iova);
1480 				size_t n = dma->iova + dma->size - iova;
1481 				long npage;
1482 
1483 				npage = vfio_pin_pages_remote(dma, vaddr,
1484 							      n >> PAGE_SHIFT,
1485 							      &pfn, limit);
1486 				if (npage <= 0) {
1487 					WARN_ON(!npage);
1488 					ret = (int)npage;
1489 					goto unwind;
1490 				}
1491 
1492 				phys = pfn << PAGE_SHIFT;
1493 				size = npage << PAGE_SHIFT;
1494 			}
1495 
1496 			ret = iommu_map(domain->domain, iova, phys,
1497 					size, dma->prot | domain->prot);
1498 			if (ret) {
1499 				if (!dma->iommu_mapped)
1500 					vfio_unpin_pages_remote(dma, iova,
1501 							phys >> PAGE_SHIFT,
1502 							size >> PAGE_SHIFT,
1503 							true);
1504 				goto unwind;
1505 			}
1506 
1507 			iova += size;
1508 		}
1509 	}
1510 
1511 	/* All dmas are now mapped, defer to second tree walk for unwind */
1512 	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1513 		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1514 
1515 		dma->iommu_mapped = true;
1516 	}
1517 
1518 	return 0;
1519 
1520 unwind:
1521 	for (; n; n = rb_prev(n)) {
1522 		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1523 		dma_addr_t iova;
1524 
1525 		if (dma->iommu_mapped) {
1526 			iommu_unmap(domain->domain, dma->iova, dma->size);
1527 			continue;
1528 		}
1529 
1530 		iova = dma->iova;
1531 		while (iova < dma->iova + dma->size) {
1532 			phys_addr_t phys, p;
1533 			size_t size;
1534 			dma_addr_t i;
1535 
1536 			phys = iommu_iova_to_phys(domain->domain, iova);
1537 			if (!phys) {
1538 				iova += PAGE_SIZE;
1539 				continue;
1540 			}
1541 
1542 			size = PAGE_SIZE;
1543 			p = phys + size;
1544 			i = iova + size;
1545 			while (i < dma->iova + dma->size &&
1546 			       p == iommu_iova_to_phys(domain->domain, i)) {
1547 				size += PAGE_SIZE;
1548 				p += PAGE_SIZE;
1549 				i += PAGE_SIZE;
1550 			}
1551 
1552 			iommu_unmap(domain->domain, iova, size);
1553 			vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
1554 						size >> PAGE_SHIFT, true);
1555 		}
1556 	}
1557 
1558 	return ret;
1559 }
1560 
1561 /*
1562  * We change our unmap behavior slightly depending on whether the IOMMU
1563  * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
1564  * for practically any contiguous power-of-two mapping we give it.  This means
1565  * we don't need to look for contiguous chunks ourselves to make unmapping
1566  * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
1567  * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
1568  * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
1569  * hugetlbfs is in use.
1570  */
vfio_test_domain_fgsp(struct vfio_domain * domain)1571 static void vfio_test_domain_fgsp(struct vfio_domain *domain)
1572 {
1573 	struct page *pages;
1574 	int ret, order = get_order(PAGE_SIZE * 2);
1575 
1576 	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1577 	if (!pages)
1578 		return;
1579 
1580 	ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
1581 			IOMMU_READ | IOMMU_WRITE | domain->prot);
1582 	if (!ret) {
1583 		size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
1584 
1585 		if (unmapped == PAGE_SIZE)
1586 			iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
1587 		else
1588 			domain->fgsp = true;
1589 	}
1590 
1591 	__free_pages(pages, order);
1592 }
1593 
find_iommu_group(struct vfio_domain * domain,struct iommu_group * iommu_group)1594 static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1595 					   struct iommu_group *iommu_group)
1596 {
1597 	struct vfio_group *g;
1598 
1599 	list_for_each_entry(g, &domain->group_list, next) {
1600 		if (g->iommu_group == iommu_group)
1601 			return g;
1602 	}
1603 
1604 	return NULL;
1605 }
1606 
vfio_iommu_find_iommu_group(struct vfio_iommu * iommu,struct iommu_group * iommu_group)1607 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
1608 					       struct iommu_group *iommu_group)
1609 {
1610 	struct vfio_domain *domain;
1611 	struct vfio_group *group = NULL;
1612 
1613 	list_for_each_entry(domain, &iommu->domain_list, next) {
1614 		group = find_iommu_group(domain, iommu_group);
1615 		if (group)
1616 			return group;
1617 	}
1618 
1619 	if (iommu->external_domain)
1620 		group = find_iommu_group(iommu->external_domain, iommu_group);
1621 
1622 	return group;
1623 }
1624 
update_pinned_page_dirty_scope(struct vfio_iommu * iommu)1625 static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
1626 {
1627 	struct vfio_domain *domain;
1628 	struct vfio_group *group;
1629 
1630 	list_for_each_entry(domain, &iommu->domain_list, next) {
1631 		list_for_each_entry(group, &domain->group_list, next) {
1632 			if (!group->pinned_page_dirty_scope) {
1633 				iommu->pinned_page_dirty_scope = false;
1634 				return;
1635 			}
1636 		}
1637 	}
1638 
1639 	if (iommu->external_domain) {
1640 		domain = iommu->external_domain;
1641 		list_for_each_entry(group, &domain->group_list, next) {
1642 			if (!group->pinned_page_dirty_scope) {
1643 				iommu->pinned_page_dirty_scope = false;
1644 				return;
1645 			}
1646 		}
1647 	}
1648 
1649 	iommu->pinned_page_dirty_scope = true;
1650 }
1651 
vfio_iommu_has_sw_msi(struct list_head * group_resv_regions,phys_addr_t * base)1652 static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
1653 				  phys_addr_t *base)
1654 {
1655 	struct iommu_resv_region *region;
1656 	bool ret = false;
1657 
1658 	list_for_each_entry(region, group_resv_regions, list) {
1659 		/*
1660 		 * The presence of any 'real' MSI regions should take
1661 		 * precedence over the software-managed one if the
1662 		 * IOMMU driver happens to advertise both types.
1663 		 */
1664 		if (region->type == IOMMU_RESV_MSI) {
1665 			ret = false;
1666 			break;
1667 		}
1668 
1669 		if (region->type == IOMMU_RESV_SW_MSI) {
1670 			*base = region->start;
1671 			ret = true;
1672 		}
1673 	}
1674 
1675 	return ret;
1676 }
1677 
vfio_mdev_get_iommu_device(struct device * dev)1678 static struct device *vfio_mdev_get_iommu_device(struct device *dev)
1679 {
1680 	struct device *(*fn)(struct device *dev);
1681 	struct device *iommu_device;
1682 
1683 	fn = symbol_get(mdev_get_iommu_device);
1684 	if (fn) {
1685 		iommu_device = fn(dev);
1686 		symbol_put(mdev_get_iommu_device);
1687 
1688 		return iommu_device;
1689 	}
1690 
1691 	return NULL;
1692 }
1693 
vfio_mdev_attach_domain(struct device * dev,void * data)1694 static int vfio_mdev_attach_domain(struct device *dev, void *data)
1695 {
1696 	struct iommu_domain *domain = data;
1697 	struct device *iommu_device;
1698 
1699 	iommu_device = vfio_mdev_get_iommu_device(dev);
1700 	if (iommu_device) {
1701 		if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
1702 			return iommu_aux_attach_device(domain, iommu_device);
1703 		else
1704 			return iommu_attach_device(domain, iommu_device);
1705 	}
1706 
1707 	return -EINVAL;
1708 }
1709 
vfio_mdev_detach_domain(struct device * dev,void * data)1710 static int vfio_mdev_detach_domain(struct device *dev, void *data)
1711 {
1712 	struct iommu_domain *domain = data;
1713 	struct device *iommu_device;
1714 
1715 	iommu_device = vfio_mdev_get_iommu_device(dev);
1716 	if (iommu_device) {
1717 		if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
1718 			iommu_aux_detach_device(domain, iommu_device);
1719 		else
1720 			iommu_detach_device(domain, iommu_device);
1721 	}
1722 
1723 	return 0;
1724 }
1725 
vfio_iommu_attach_group(struct vfio_domain * domain,struct vfio_group * group)1726 static int vfio_iommu_attach_group(struct vfio_domain *domain,
1727 				   struct vfio_group *group)
1728 {
1729 	if (group->mdev_group)
1730 		return iommu_group_for_each_dev(group->iommu_group,
1731 						domain->domain,
1732 						vfio_mdev_attach_domain);
1733 	else
1734 		return iommu_attach_group(domain->domain, group->iommu_group);
1735 }
1736 
vfio_iommu_detach_group(struct vfio_domain * domain,struct vfio_group * group)1737 static void vfio_iommu_detach_group(struct vfio_domain *domain,
1738 				    struct vfio_group *group)
1739 {
1740 	if (group->mdev_group)
1741 		iommu_group_for_each_dev(group->iommu_group, domain->domain,
1742 					 vfio_mdev_detach_domain);
1743 	else
1744 		iommu_detach_group(domain->domain, group->iommu_group);
1745 }
1746 
vfio_bus_is_mdev(struct bus_type * bus)1747 static bool vfio_bus_is_mdev(struct bus_type *bus)
1748 {
1749 	struct bus_type *mdev_bus;
1750 	bool ret = false;
1751 
1752 	mdev_bus = symbol_get(mdev_bus_type);
1753 	if (mdev_bus) {
1754 		ret = (bus == mdev_bus);
1755 		symbol_put(mdev_bus_type);
1756 	}
1757 
1758 	return ret;
1759 }
1760 
vfio_mdev_iommu_device(struct device * dev,void * data)1761 static int vfio_mdev_iommu_device(struct device *dev, void *data)
1762 {
1763 	struct device **old = data, *new;
1764 
1765 	new = vfio_mdev_get_iommu_device(dev);
1766 	if (!new || (*old && *old != new))
1767 		return -EINVAL;
1768 
1769 	*old = new;
1770 
1771 	return 0;
1772 }
1773 
1774 /*
1775  * This is a helper function to insert an address range to iova list.
1776  * The list is initially created with a single entry corresponding to
1777  * the IOMMU domain geometry to which the device group is attached.
1778  * The list aperture gets modified when a new domain is added to the
1779  * container if the new aperture doesn't conflict with the current one
1780  * or with any existing dma mappings. The list is also modified to
1781  * exclude any reserved regions associated with the device group.
1782  */
vfio_iommu_iova_insert(struct list_head * head,dma_addr_t start,dma_addr_t end)1783 static int vfio_iommu_iova_insert(struct list_head *head,
1784 				  dma_addr_t start, dma_addr_t end)
1785 {
1786 	struct vfio_iova *region;
1787 
1788 	region = kmalloc(sizeof(*region), GFP_KERNEL);
1789 	if (!region)
1790 		return -ENOMEM;
1791 
1792 	INIT_LIST_HEAD(&region->list);
1793 	region->start = start;
1794 	region->end = end;
1795 
1796 	list_add_tail(&region->list, head);
1797 	return 0;
1798 }
1799 
1800 /*
1801  * Check the new iommu aperture conflicts with existing aper or with any
1802  * existing dma mappings.
1803  */
vfio_iommu_aper_conflict(struct vfio_iommu * iommu,dma_addr_t start,dma_addr_t end)1804 static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu,
1805 				     dma_addr_t start, dma_addr_t end)
1806 {
1807 	struct vfio_iova *first, *last;
1808 	struct list_head *iova = &iommu->iova_list;
1809 
1810 	if (list_empty(iova))
1811 		return false;
1812 
1813 	/* Disjoint sets, return conflict */
1814 	first = list_first_entry(iova, struct vfio_iova, list);
1815 	last = list_last_entry(iova, struct vfio_iova, list);
1816 	if (start > last->end || end < first->start)
1817 		return true;
1818 
1819 	/* Check for any existing dma mappings below the new start */
1820 	if (start > first->start) {
1821 		if (vfio_find_dma(iommu, first->start, start - first->start))
1822 			return true;
1823 	}
1824 
1825 	/* Check for any existing dma mappings beyond the new end */
1826 	if (end < last->end) {
1827 		if (vfio_find_dma(iommu, end + 1, last->end - end))
1828 			return true;
1829 	}
1830 
1831 	return false;
1832 }
1833 
1834 /*
1835  * Resize iommu iova aperture window. This is called only if the new
1836  * aperture has no conflict with existing aperture and dma mappings.
1837  */
vfio_iommu_aper_resize(struct list_head * iova,dma_addr_t start,dma_addr_t end)1838 static int vfio_iommu_aper_resize(struct list_head *iova,
1839 				  dma_addr_t start, dma_addr_t end)
1840 {
1841 	struct vfio_iova *node, *next;
1842 
1843 	if (list_empty(iova))
1844 		return vfio_iommu_iova_insert(iova, start, end);
1845 
1846 	/* Adjust iova list start */
1847 	list_for_each_entry_safe(node, next, iova, list) {
1848 		if (start < node->start)
1849 			break;
1850 		if (start >= node->start && start < node->end) {
1851 			node->start = start;
1852 			break;
1853 		}
1854 		/* Delete nodes before new start */
1855 		list_del(&node->list);
1856 		kfree(node);
1857 	}
1858 
1859 	/* Adjust iova list end */
1860 	list_for_each_entry_safe(node, next, iova, list) {
1861 		if (end > node->end)
1862 			continue;
1863 		if (end > node->start && end <= node->end) {
1864 			node->end = end;
1865 			continue;
1866 		}
1867 		/* Delete nodes after new end */
1868 		list_del(&node->list);
1869 		kfree(node);
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 /*
1876  * Check reserved region conflicts with existing dma mappings
1877  */
vfio_iommu_resv_conflict(struct vfio_iommu * iommu,struct list_head * resv_regions)1878 static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,
1879 				     struct list_head *resv_regions)
1880 {
1881 	struct iommu_resv_region *region;
1882 
1883 	/* Check for conflict with existing dma mappings */
1884 	list_for_each_entry(region, resv_regions, list) {
1885 		if (region->type == IOMMU_RESV_DIRECT_RELAXABLE)
1886 			continue;
1887 
1888 		if (vfio_find_dma(iommu, region->start, region->length))
1889 			return true;
1890 	}
1891 
1892 	return false;
1893 }
1894 
1895 /*
1896  * Check iova region overlap with  reserved regions and
1897  * exclude them from the iommu iova range
1898  */
vfio_iommu_resv_exclude(struct list_head * iova,struct list_head * resv_regions)1899 static int vfio_iommu_resv_exclude(struct list_head *iova,
1900 				   struct list_head *resv_regions)
1901 {
1902 	struct iommu_resv_region *resv;
1903 	struct vfio_iova *n, *next;
1904 
1905 	list_for_each_entry(resv, resv_regions, list) {
1906 		phys_addr_t start, end;
1907 
1908 		if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
1909 			continue;
1910 
1911 		start = resv->start;
1912 		end = resv->start + resv->length - 1;
1913 
1914 		list_for_each_entry_safe(n, next, iova, list) {
1915 			int ret = 0;
1916 
1917 			/* No overlap */
1918 			if (start > n->end || end < n->start)
1919 				continue;
1920 			/*
1921 			 * Insert a new node if current node overlaps with the
1922 			 * reserve region to exlude that from valid iova range.
1923 			 * Note that, new node is inserted before the current
1924 			 * node and finally the current node is deleted keeping
1925 			 * the list updated and sorted.
1926 			 */
1927 			if (start > n->start)
1928 				ret = vfio_iommu_iova_insert(&n->list, n->start,
1929 							     start - 1);
1930 			if (!ret && end < n->end)
1931 				ret = vfio_iommu_iova_insert(&n->list, end + 1,
1932 							     n->end);
1933 			if (ret)
1934 				return ret;
1935 
1936 			list_del(&n->list);
1937 			kfree(n);
1938 		}
1939 	}
1940 
1941 	if (list_empty(iova))
1942 		return -EINVAL;
1943 
1944 	return 0;
1945 }
1946 
vfio_iommu_resv_free(struct list_head * resv_regions)1947 static void vfio_iommu_resv_free(struct list_head *resv_regions)
1948 {
1949 	struct iommu_resv_region *n, *next;
1950 
1951 	list_for_each_entry_safe(n, next, resv_regions, list) {
1952 		list_del(&n->list);
1953 		kfree(n);
1954 	}
1955 }
1956 
vfio_iommu_iova_free(struct list_head * iova)1957 static void vfio_iommu_iova_free(struct list_head *iova)
1958 {
1959 	struct vfio_iova *n, *next;
1960 
1961 	list_for_each_entry_safe(n, next, iova, list) {
1962 		list_del(&n->list);
1963 		kfree(n);
1964 	}
1965 }
1966 
vfio_iommu_iova_get_copy(struct vfio_iommu * iommu,struct list_head * iova_copy)1967 static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu,
1968 				    struct list_head *iova_copy)
1969 {
1970 	struct list_head *iova = &iommu->iova_list;
1971 	struct vfio_iova *n;
1972 	int ret;
1973 
1974 	list_for_each_entry(n, iova, list) {
1975 		ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end);
1976 		if (ret)
1977 			goto out_free;
1978 	}
1979 
1980 	return 0;
1981 
1982 out_free:
1983 	vfio_iommu_iova_free(iova_copy);
1984 	return ret;
1985 }
1986 
vfio_iommu_iova_insert_copy(struct vfio_iommu * iommu,struct list_head * iova_copy)1987 static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
1988 					struct list_head *iova_copy)
1989 {
1990 	struct list_head *iova = &iommu->iova_list;
1991 
1992 	vfio_iommu_iova_free(iova);
1993 
1994 	list_splice_tail(iova_copy, iova);
1995 }
1996 
vfio_iommu_type1_attach_group(void * iommu_data,struct iommu_group * iommu_group)1997 static int vfio_iommu_type1_attach_group(void *iommu_data,
1998 					 struct iommu_group *iommu_group)
1999 {
2000 	struct vfio_iommu *iommu = iommu_data;
2001 	struct vfio_group *group;
2002 	struct vfio_domain *domain, *d;
2003 	struct bus_type *bus = NULL;
2004 	int ret;
2005 	bool resv_msi, msi_remap;
2006 	phys_addr_t resv_msi_base = 0;
2007 	struct iommu_domain_geometry geo;
2008 	LIST_HEAD(iova_copy);
2009 	LIST_HEAD(group_resv_regions);
2010 
2011 	mutex_lock(&iommu->lock);
2012 
2013 	/* Check for duplicates */
2014 	if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
2015 		mutex_unlock(&iommu->lock);
2016 		return -EINVAL;
2017 	}
2018 
2019 	group = kzalloc(sizeof(*group), GFP_KERNEL);
2020 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2021 	if (!group || !domain) {
2022 		ret = -ENOMEM;
2023 		goto out_free;
2024 	}
2025 
2026 	group->iommu_group = iommu_group;
2027 
2028 	/* Determine bus_type in order to allocate a domain */
2029 	ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
2030 	if (ret)
2031 		goto out_free;
2032 
2033 	if (vfio_bus_is_mdev(bus)) {
2034 		struct device *iommu_device = NULL;
2035 
2036 		group->mdev_group = true;
2037 
2038 		/* Determine the isolation type */
2039 		ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
2040 					       vfio_mdev_iommu_device);
2041 		if (ret || !iommu_device) {
2042 			if (!iommu->external_domain) {
2043 				INIT_LIST_HEAD(&domain->group_list);
2044 				iommu->external_domain = domain;
2045 				vfio_update_pgsize_bitmap(iommu);
2046 			} else {
2047 				kfree(domain);
2048 			}
2049 
2050 			list_add(&group->next,
2051 				 &iommu->external_domain->group_list);
2052 			/*
2053 			 * Non-iommu backed group cannot dirty memory directly,
2054 			 * it can only use interfaces that provide dirty
2055 			 * tracking.
2056 			 * The iommu scope can only be promoted with the
2057 			 * addition of a dirty tracking group.
2058 			 */
2059 			group->pinned_page_dirty_scope = true;
2060 			if (!iommu->pinned_page_dirty_scope)
2061 				update_pinned_page_dirty_scope(iommu);
2062 			mutex_unlock(&iommu->lock);
2063 
2064 			return 0;
2065 		}
2066 
2067 		bus = iommu_device->bus;
2068 	}
2069 
2070 	domain->domain = iommu_domain_alloc(bus);
2071 	if (!domain->domain) {
2072 		ret = -EIO;
2073 		goto out_free;
2074 	}
2075 
2076 	if (iommu->nesting) {
2077 		int attr = 1;
2078 
2079 		ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
2080 					    &attr);
2081 		if (ret)
2082 			goto out_domain;
2083 	}
2084 
2085 	ret = vfio_iommu_attach_group(domain, group);
2086 	if (ret)
2087 		goto out_domain;
2088 
2089 	/* Get aperture info */
2090 	iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo);
2091 
2092 	if (vfio_iommu_aper_conflict(iommu, geo.aperture_start,
2093 				     geo.aperture_end)) {
2094 		ret = -EINVAL;
2095 		goto out_detach;
2096 	}
2097 
2098 	ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions);
2099 	if (ret)
2100 		goto out_detach;
2101 
2102 	if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
2103 		ret = -EINVAL;
2104 		goto out_detach;
2105 	}
2106 
2107 	/*
2108 	 * We don't want to work on the original iova list as the list
2109 	 * gets modified and in case of failure we have to retain the
2110 	 * original list. Get a copy here.
2111 	 */
2112 	ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
2113 	if (ret)
2114 		goto out_detach;
2115 
2116 	ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start,
2117 				     geo.aperture_end);
2118 	if (ret)
2119 		goto out_detach;
2120 
2121 	ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);
2122 	if (ret)
2123 		goto out_detach;
2124 
2125 	resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base);
2126 
2127 	INIT_LIST_HEAD(&domain->group_list);
2128 	list_add(&group->next, &domain->group_list);
2129 
2130 	msi_remap = irq_domain_check_msi_remap() ||
2131 		    iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
2132 
2133 	if (!allow_unsafe_interrupts && !msi_remap) {
2134 		pr_warn("%s: No interrupt remapping support.  Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
2135 		       __func__);
2136 		ret = -EPERM;
2137 		goto out_detach;
2138 	}
2139 
2140 	if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
2141 		domain->prot |= IOMMU_CACHE;
2142 
2143 	/*
2144 	 * Try to match an existing compatible domain.  We don't want to
2145 	 * preclude an IOMMU driver supporting multiple bus_types and being
2146 	 * able to include different bus_types in the same IOMMU domain, so
2147 	 * we test whether the domains use the same iommu_ops rather than
2148 	 * testing if they're on the same bus_type.
2149 	 */
2150 	list_for_each_entry(d, &iommu->domain_list, next) {
2151 		if (d->domain->ops == domain->domain->ops &&
2152 		    d->prot == domain->prot) {
2153 			vfio_iommu_detach_group(domain, group);
2154 			if (!vfio_iommu_attach_group(d, group)) {
2155 				list_add(&group->next, &d->group_list);
2156 				iommu_domain_free(domain->domain);
2157 				kfree(domain);
2158 				goto done;
2159 			}
2160 
2161 			ret = vfio_iommu_attach_group(domain, group);
2162 			if (ret)
2163 				goto out_domain;
2164 		}
2165 	}
2166 
2167 	vfio_test_domain_fgsp(domain);
2168 
2169 	/* replay mappings on new domains */
2170 	ret = vfio_iommu_replay(iommu, domain);
2171 	if (ret)
2172 		goto out_detach;
2173 
2174 	if (resv_msi) {
2175 		ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
2176 		if (ret && ret != -ENODEV)
2177 			goto out_detach;
2178 	}
2179 
2180 	list_add(&domain->next, &iommu->domain_list);
2181 	vfio_update_pgsize_bitmap(iommu);
2182 done:
2183 	/* Delete the old one and insert new iova list */
2184 	vfio_iommu_iova_insert_copy(iommu, &iova_copy);
2185 
2186 	/*
2187 	 * An iommu backed group can dirty memory directly and therefore
2188 	 * demotes the iommu scope until it declares itself dirty tracking
2189 	 * capable via the page pinning interface.
2190 	 */
2191 	iommu->pinned_page_dirty_scope = false;
2192 	mutex_unlock(&iommu->lock);
2193 	vfio_iommu_resv_free(&group_resv_regions);
2194 
2195 	return 0;
2196 
2197 out_detach:
2198 	vfio_iommu_detach_group(domain, group);
2199 out_domain:
2200 	iommu_domain_free(domain->domain);
2201 	vfio_iommu_iova_free(&iova_copy);
2202 	vfio_iommu_resv_free(&group_resv_regions);
2203 out_free:
2204 	kfree(domain);
2205 	kfree(group);
2206 	mutex_unlock(&iommu->lock);
2207 	return ret;
2208 }
2209 
vfio_iommu_unmap_unpin_all(struct vfio_iommu * iommu)2210 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
2211 {
2212 	struct rb_node *node;
2213 
2214 	while ((node = rb_first(&iommu->dma_list)))
2215 		vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
2216 }
2217 
vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu * iommu)2218 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
2219 {
2220 	struct rb_node *n, *p;
2221 
2222 	n = rb_first(&iommu->dma_list);
2223 	for (; n; n = rb_next(n)) {
2224 		struct vfio_dma *dma;
2225 		long locked = 0, unlocked = 0;
2226 
2227 		dma = rb_entry(n, struct vfio_dma, node);
2228 		unlocked += vfio_unmap_unpin(iommu, dma, false);
2229 		p = rb_first(&dma->pfn_list);
2230 		for (; p; p = rb_next(p)) {
2231 			struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
2232 							 node);
2233 
2234 			if (!is_invalid_reserved_pfn(vpfn->pfn))
2235 				locked++;
2236 		}
2237 		vfio_lock_acct(dma, locked - unlocked, true);
2238 	}
2239 }
2240 
vfio_sanity_check_pfn_list(struct vfio_iommu * iommu)2241 static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
2242 {
2243 	struct rb_node *n;
2244 
2245 	n = rb_first(&iommu->dma_list);
2246 	for (; n; n = rb_next(n)) {
2247 		struct vfio_dma *dma;
2248 
2249 		dma = rb_entry(n, struct vfio_dma, node);
2250 
2251 		if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
2252 			break;
2253 	}
2254 	/* mdev vendor driver must unregister notifier */
2255 	WARN_ON(iommu->notifier.head);
2256 }
2257 
2258 /*
2259  * Called when a domain is removed in detach. It is possible that
2260  * the removed domain decided the iova aperture window. Modify the
2261  * iova aperture with the smallest window among existing domains.
2262  */
vfio_iommu_aper_expand(struct vfio_iommu * iommu,struct list_head * iova_copy)2263 static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
2264 				   struct list_head *iova_copy)
2265 {
2266 	struct vfio_domain *domain;
2267 	struct iommu_domain_geometry geo;
2268 	struct vfio_iova *node;
2269 	dma_addr_t start = 0;
2270 	dma_addr_t end = (dma_addr_t)~0;
2271 
2272 	if (list_empty(iova_copy))
2273 		return;
2274 
2275 	list_for_each_entry(domain, &iommu->domain_list, next) {
2276 		iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,
2277 				      &geo);
2278 		if (geo.aperture_start > start)
2279 			start = geo.aperture_start;
2280 		if (geo.aperture_end < end)
2281 			end = geo.aperture_end;
2282 	}
2283 
2284 	/* Modify aperture limits. The new aper is either same or bigger */
2285 	node = list_first_entry(iova_copy, struct vfio_iova, list);
2286 	node->start = start;
2287 	node = list_last_entry(iova_copy, struct vfio_iova, list);
2288 	node->end = end;
2289 }
2290 
2291 /*
2292  * Called when a group is detached. The reserved regions for that
2293  * group can be part of valid iova now. But since reserved regions
2294  * may be duplicated among groups, populate the iova valid regions
2295  * list again.
2296  */
vfio_iommu_resv_refresh(struct vfio_iommu * iommu,struct list_head * iova_copy)2297 static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
2298 				   struct list_head *iova_copy)
2299 {
2300 	struct vfio_domain *d;
2301 	struct vfio_group *g;
2302 	struct vfio_iova *node;
2303 	dma_addr_t start, end;
2304 	LIST_HEAD(resv_regions);
2305 	int ret;
2306 
2307 	if (list_empty(iova_copy))
2308 		return -EINVAL;
2309 
2310 	list_for_each_entry(d, &iommu->domain_list, next) {
2311 		list_for_each_entry(g, &d->group_list, next) {
2312 			ret = iommu_get_group_resv_regions(g->iommu_group,
2313 							   &resv_regions);
2314 			if (ret)
2315 				goto done;
2316 		}
2317 	}
2318 
2319 	node = list_first_entry(iova_copy, struct vfio_iova, list);
2320 	start = node->start;
2321 	node = list_last_entry(iova_copy, struct vfio_iova, list);
2322 	end = node->end;
2323 
2324 	/* purge the iova list and create new one */
2325 	vfio_iommu_iova_free(iova_copy);
2326 
2327 	ret = vfio_iommu_aper_resize(iova_copy, start, end);
2328 	if (ret)
2329 		goto done;
2330 
2331 	/* Exclude current reserved regions from iova ranges */
2332 	ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);
2333 done:
2334 	vfio_iommu_resv_free(&resv_regions);
2335 	return ret;
2336 }
2337 
vfio_iommu_type1_detach_group(void * iommu_data,struct iommu_group * iommu_group)2338 static void vfio_iommu_type1_detach_group(void *iommu_data,
2339 					  struct iommu_group *iommu_group)
2340 {
2341 	struct vfio_iommu *iommu = iommu_data;
2342 	struct vfio_domain *domain;
2343 	struct vfio_group *group;
2344 	bool update_dirty_scope = false;
2345 	LIST_HEAD(iova_copy);
2346 
2347 	mutex_lock(&iommu->lock);
2348 
2349 	if (iommu->external_domain) {
2350 		group = find_iommu_group(iommu->external_domain, iommu_group);
2351 		if (group) {
2352 			update_dirty_scope = !group->pinned_page_dirty_scope;
2353 			list_del(&group->next);
2354 			kfree(group);
2355 
2356 			if (list_empty(&iommu->external_domain->group_list)) {
2357 				vfio_sanity_check_pfn_list(iommu);
2358 
2359 				if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
2360 					vfio_iommu_unmap_unpin_all(iommu);
2361 
2362 				kfree(iommu->external_domain);
2363 				iommu->external_domain = NULL;
2364 			}
2365 			goto detach_group_done;
2366 		}
2367 	}
2368 
2369 	/*
2370 	 * Get a copy of iova list. This will be used to update
2371 	 * and to replace the current one later. Please note that
2372 	 * we will leave the original list as it is if update fails.
2373 	 */
2374 	vfio_iommu_iova_get_copy(iommu, &iova_copy);
2375 
2376 	list_for_each_entry(domain, &iommu->domain_list, next) {
2377 		group = find_iommu_group(domain, iommu_group);
2378 		if (!group)
2379 			continue;
2380 
2381 		vfio_iommu_detach_group(domain, group);
2382 		update_dirty_scope = !group->pinned_page_dirty_scope;
2383 		list_del(&group->next);
2384 		kfree(group);
2385 		/*
2386 		 * Group ownership provides privilege, if the group list is
2387 		 * empty, the domain goes away. If it's the last domain with
2388 		 * iommu and external domain doesn't exist, then all the
2389 		 * mappings go away too. If it's the last domain with iommu and
2390 		 * external domain exist, update accounting
2391 		 */
2392 		if (list_empty(&domain->group_list)) {
2393 			if (list_is_singular(&iommu->domain_list)) {
2394 				if (!iommu->external_domain)
2395 					vfio_iommu_unmap_unpin_all(iommu);
2396 				else
2397 					vfio_iommu_unmap_unpin_reaccount(iommu);
2398 			}
2399 			iommu_domain_free(domain->domain);
2400 			list_del(&domain->next);
2401 			kfree(domain);
2402 			vfio_iommu_aper_expand(iommu, &iova_copy);
2403 			vfio_update_pgsize_bitmap(iommu);
2404 		}
2405 		break;
2406 	}
2407 
2408 	if (!vfio_iommu_resv_refresh(iommu, &iova_copy))
2409 		vfio_iommu_iova_insert_copy(iommu, &iova_copy);
2410 	else
2411 		vfio_iommu_iova_free(&iova_copy);
2412 
2413 detach_group_done:
2414 	/*
2415 	 * Removal of a group without dirty tracking may allow the iommu scope
2416 	 * to be promoted.
2417 	 */
2418 	if (update_dirty_scope)
2419 		update_pinned_page_dirty_scope(iommu);
2420 	mutex_unlock(&iommu->lock);
2421 }
2422 
vfio_iommu_type1_open(unsigned long arg)2423 static void *vfio_iommu_type1_open(unsigned long arg)
2424 {
2425 	struct vfio_iommu *iommu;
2426 
2427 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
2428 	if (!iommu)
2429 		return ERR_PTR(-ENOMEM);
2430 
2431 	switch (arg) {
2432 	case VFIO_TYPE1_IOMMU:
2433 		break;
2434 	case VFIO_TYPE1_NESTING_IOMMU:
2435 		iommu->nesting = true;
2436 		fallthrough;
2437 	case VFIO_TYPE1v2_IOMMU:
2438 		iommu->v2 = true;
2439 		break;
2440 	default:
2441 		kfree(iommu);
2442 		return ERR_PTR(-EINVAL);
2443 	}
2444 
2445 	INIT_LIST_HEAD(&iommu->domain_list);
2446 	INIT_LIST_HEAD(&iommu->iova_list);
2447 	iommu->dma_list = RB_ROOT;
2448 	iommu->dma_avail = dma_entry_limit;
2449 	mutex_init(&iommu->lock);
2450 	BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
2451 
2452 	return iommu;
2453 }
2454 
vfio_release_domain(struct vfio_domain * domain,bool external)2455 static void vfio_release_domain(struct vfio_domain *domain, bool external)
2456 {
2457 	struct vfio_group *group, *group_tmp;
2458 
2459 	list_for_each_entry_safe(group, group_tmp,
2460 				 &domain->group_list, next) {
2461 		if (!external)
2462 			vfio_iommu_detach_group(domain, group);
2463 		list_del(&group->next);
2464 		kfree(group);
2465 	}
2466 
2467 	if (!external)
2468 		iommu_domain_free(domain->domain);
2469 }
2470 
vfio_iommu_type1_release(void * iommu_data)2471 static void vfio_iommu_type1_release(void *iommu_data)
2472 {
2473 	struct vfio_iommu *iommu = iommu_data;
2474 	struct vfio_domain *domain, *domain_tmp;
2475 
2476 	if (iommu->external_domain) {
2477 		vfio_release_domain(iommu->external_domain, true);
2478 		vfio_sanity_check_pfn_list(iommu);
2479 		kfree(iommu->external_domain);
2480 	}
2481 
2482 	vfio_iommu_unmap_unpin_all(iommu);
2483 
2484 	list_for_each_entry_safe(domain, domain_tmp,
2485 				 &iommu->domain_list, next) {
2486 		vfio_release_domain(domain, false);
2487 		list_del(&domain->next);
2488 		kfree(domain);
2489 	}
2490 
2491 	vfio_iommu_iova_free(&iommu->iova_list);
2492 
2493 	kfree(iommu);
2494 }
2495 
vfio_domains_have_iommu_cache(struct vfio_iommu * iommu)2496 static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
2497 {
2498 	struct vfio_domain *domain;
2499 	int ret = 1;
2500 
2501 	mutex_lock(&iommu->lock);
2502 	list_for_each_entry(domain, &iommu->domain_list, next) {
2503 		if (!(domain->prot & IOMMU_CACHE)) {
2504 			ret = 0;
2505 			break;
2506 		}
2507 	}
2508 	mutex_unlock(&iommu->lock);
2509 
2510 	return ret;
2511 }
2512 
vfio_iommu_type1_check_extension(struct vfio_iommu * iommu,unsigned long arg)2513 static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
2514 					    unsigned long arg)
2515 {
2516 	switch (arg) {
2517 	case VFIO_TYPE1_IOMMU:
2518 	case VFIO_TYPE1v2_IOMMU:
2519 	case VFIO_TYPE1_NESTING_IOMMU:
2520 		return 1;
2521 	case VFIO_DMA_CC_IOMMU:
2522 		if (!iommu)
2523 			return 0;
2524 		return vfio_domains_have_iommu_cache(iommu);
2525 	default:
2526 		return 0;
2527 	}
2528 }
2529 
vfio_iommu_iova_add_cap(struct vfio_info_cap * caps,struct vfio_iommu_type1_info_cap_iova_range * cap_iovas,size_t size)2530 static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
2531 		 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
2532 		 size_t size)
2533 {
2534 	struct vfio_info_cap_header *header;
2535 	struct vfio_iommu_type1_info_cap_iova_range *iova_cap;
2536 
2537 	header = vfio_info_cap_add(caps, size,
2538 				   VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1);
2539 	if (IS_ERR(header))
2540 		return PTR_ERR(header);
2541 
2542 	iova_cap = container_of(header,
2543 				struct vfio_iommu_type1_info_cap_iova_range,
2544 				header);
2545 	iova_cap->nr_iovas = cap_iovas->nr_iovas;
2546 	memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges,
2547 	       cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges));
2548 	return 0;
2549 }
2550 
vfio_iommu_iova_build_caps(struct vfio_iommu * iommu,struct vfio_info_cap * caps)2551 static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
2552 				      struct vfio_info_cap *caps)
2553 {
2554 	struct vfio_iommu_type1_info_cap_iova_range *cap_iovas;
2555 	struct vfio_iova *iova;
2556 	size_t size;
2557 	int iovas = 0, i = 0, ret;
2558 
2559 	list_for_each_entry(iova, &iommu->iova_list, list)
2560 		iovas++;
2561 
2562 	if (!iovas) {
2563 		/*
2564 		 * Return 0 as a container with a single mdev device
2565 		 * will have an empty list
2566 		 */
2567 		return 0;
2568 	}
2569 
2570 	size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
2571 
2572 	cap_iovas = kzalloc(size, GFP_KERNEL);
2573 	if (!cap_iovas)
2574 		return -ENOMEM;
2575 
2576 	cap_iovas->nr_iovas = iovas;
2577 
2578 	list_for_each_entry(iova, &iommu->iova_list, list) {
2579 		cap_iovas->iova_ranges[i].start = iova->start;
2580 		cap_iovas->iova_ranges[i].end = iova->end;
2581 		i++;
2582 	}
2583 
2584 	ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
2585 
2586 	kfree(cap_iovas);
2587 	return ret;
2588 }
2589 
vfio_iommu_migration_build_caps(struct vfio_iommu * iommu,struct vfio_info_cap * caps)2590 static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
2591 					   struct vfio_info_cap *caps)
2592 {
2593 	struct vfio_iommu_type1_info_cap_migration cap_mig;
2594 
2595 	cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
2596 	cap_mig.header.version = 1;
2597 
2598 	cap_mig.flags = 0;
2599 	/* support minimum pgsize */
2600 	cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
2601 	cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
2602 
2603 	return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
2604 }
2605 
vfio_iommu_dma_avail_build_caps(struct vfio_iommu * iommu,struct vfio_info_cap * caps)2606 static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
2607 					   struct vfio_info_cap *caps)
2608 {
2609 	struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
2610 
2611 	cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
2612 	cap_dma_avail.header.version = 1;
2613 
2614 	cap_dma_avail.avail = iommu->dma_avail;
2615 
2616 	return vfio_info_add_capability(caps, &cap_dma_avail.header,
2617 					sizeof(cap_dma_avail));
2618 }
2619 
vfio_iommu_type1_get_info(struct vfio_iommu * iommu,unsigned long arg)2620 static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
2621 				     unsigned long arg)
2622 {
2623 	struct vfio_iommu_type1_info info;
2624 	unsigned long minsz;
2625 	struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
2626 	unsigned long capsz;
2627 	int ret;
2628 
2629 	minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
2630 
2631 	/* For backward compatibility, cannot require this */
2632 	capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
2633 
2634 	if (copy_from_user(&info, (void __user *)arg, minsz))
2635 		return -EFAULT;
2636 
2637 	if (info.argsz < minsz)
2638 		return -EINVAL;
2639 
2640 	if (info.argsz >= capsz) {
2641 		minsz = capsz;
2642 		info.cap_offset = 0; /* output, no-recopy necessary */
2643 	}
2644 
2645 	mutex_lock(&iommu->lock);
2646 	info.flags = VFIO_IOMMU_INFO_PGSIZES;
2647 
2648 	info.iova_pgsizes = iommu->pgsize_bitmap;
2649 
2650 	ret = vfio_iommu_migration_build_caps(iommu, &caps);
2651 
2652 	if (!ret)
2653 		ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
2654 
2655 	if (!ret)
2656 		ret = vfio_iommu_iova_build_caps(iommu, &caps);
2657 
2658 	mutex_unlock(&iommu->lock);
2659 
2660 	if (ret)
2661 		return ret;
2662 
2663 	if (caps.size) {
2664 		info.flags |= VFIO_IOMMU_INFO_CAPS;
2665 
2666 		if (info.argsz < sizeof(info) + caps.size) {
2667 			info.argsz = sizeof(info) + caps.size;
2668 		} else {
2669 			vfio_info_cap_shift(&caps, sizeof(info));
2670 			if (copy_to_user((void __user *)arg +
2671 					sizeof(info), caps.buf,
2672 					caps.size)) {
2673 				kfree(caps.buf);
2674 				return -EFAULT;
2675 			}
2676 			info.cap_offset = sizeof(info);
2677 		}
2678 
2679 		kfree(caps.buf);
2680 	}
2681 
2682 	return copy_to_user((void __user *)arg, &info, minsz) ?
2683 			-EFAULT : 0;
2684 }
2685 
vfio_iommu_type1_map_dma(struct vfio_iommu * iommu,unsigned long arg)2686 static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
2687 				    unsigned long arg)
2688 {
2689 	struct vfio_iommu_type1_dma_map map;
2690 	unsigned long minsz;
2691 	uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2692 
2693 	minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
2694 
2695 	if (copy_from_user(&map, (void __user *)arg, minsz))
2696 		return -EFAULT;
2697 
2698 	if (map.argsz < minsz || map.flags & ~mask)
2699 		return -EINVAL;
2700 
2701 	return vfio_dma_do_map(iommu, &map);
2702 }
2703 
vfio_iommu_type1_unmap_dma(struct vfio_iommu * iommu,unsigned long arg)2704 static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
2705 				      unsigned long arg)
2706 {
2707 	struct vfio_iommu_type1_dma_unmap unmap;
2708 	struct vfio_bitmap bitmap = { 0 };
2709 	unsigned long minsz;
2710 	int ret;
2711 
2712 	minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
2713 
2714 	if (copy_from_user(&unmap, (void __user *)arg, minsz))
2715 		return -EFAULT;
2716 
2717 	if (unmap.argsz < minsz ||
2718 	    unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
2719 		return -EINVAL;
2720 
2721 	if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
2722 		unsigned long pgshift;
2723 
2724 		if (unmap.argsz < (minsz + sizeof(bitmap)))
2725 			return -EINVAL;
2726 
2727 		if (copy_from_user(&bitmap,
2728 				   (void __user *)(arg + minsz),
2729 				   sizeof(bitmap)))
2730 			return -EFAULT;
2731 
2732 		if (!access_ok((void __user *)bitmap.data, bitmap.size))
2733 			return -EINVAL;
2734 
2735 		pgshift = __ffs(bitmap.pgsize);
2736 		ret = verify_bitmap_size(unmap.size >> pgshift,
2737 					 bitmap.size);
2738 		if (ret)
2739 			return ret;
2740 	}
2741 
2742 	ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
2743 	if (ret)
2744 		return ret;
2745 
2746 	return copy_to_user((void __user *)arg, &unmap, minsz) ?
2747 			-EFAULT : 0;
2748 }
2749 
vfio_iommu_type1_dirty_pages(struct vfio_iommu * iommu,unsigned long arg)2750 static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
2751 					unsigned long arg)
2752 {
2753 	struct vfio_iommu_type1_dirty_bitmap dirty;
2754 	uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
2755 			VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
2756 			VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
2757 	unsigned long minsz;
2758 	int ret = 0;
2759 
2760 	if (!iommu->v2)
2761 		return -EACCES;
2762 
2763 	minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
2764 
2765 	if (copy_from_user(&dirty, (void __user *)arg, minsz))
2766 		return -EFAULT;
2767 
2768 	if (dirty.argsz < minsz || dirty.flags & ~mask)
2769 		return -EINVAL;
2770 
2771 	/* only one flag should be set at a time */
2772 	if (__ffs(dirty.flags) != __fls(dirty.flags))
2773 		return -EINVAL;
2774 
2775 	if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
2776 		size_t pgsize;
2777 
2778 		mutex_lock(&iommu->lock);
2779 		pgsize = 1 << __ffs(iommu->pgsize_bitmap);
2780 		if (!iommu->dirty_page_tracking) {
2781 			ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
2782 			if (!ret)
2783 				iommu->dirty_page_tracking = true;
2784 		}
2785 		mutex_unlock(&iommu->lock);
2786 		return ret;
2787 	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
2788 		mutex_lock(&iommu->lock);
2789 		if (iommu->dirty_page_tracking) {
2790 			iommu->dirty_page_tracking = false;
2791 			vfio_dma_bitmap_free_all(iommu);
2792 		}
2793 		mutex_unlock(&iommu->lock);
2794 		return 0;
2795 	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
2796 		struct vfio_iommu_type1_dirty_bitmap_get range;
2797 		unsigned long pgshift;
2798 		size_t data_size = dirty.argsz - minsz;
2799 		size_t iommu_pgsize;
2800 
2801 		if (!data_size || data_size < sizeof(range))
2802 			return -EINVAL;
2803 
2804 		if (copy_from_user(&range, (void __user *)(arg + minsz),
2805 				   sizeof(range)))
2806 			return -EFAULT;
2807 
2808 		if (range.iova + range.size < range.iova)
2809 			return -EINVAL;
2810 		if (!access_ok((void __user *)range.bitmap.data,
2811 			       range.bitmap.size))
2812 			return -EINVAL;
2813 
2814 		pgshift = __ffs(range.bitmap.pgsize);
2815 		ret = verify_bitmap_size(range.size >> pgshift,
2816 					 range.bitmap.size);
2817 		if (ret)
2818 			return ret;
2819 
2820 		mutex_lock(&iommu->lock);
2821 
2822 		iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
2823 
2824 		/* allow only smallest supported pgsize */
2825 		if (range.bitmap.pgsize != iommu_pgsize) {
2826 			ret = -EINVAL;
2827 			goto out_unlock;
2828 		}
2829 		if (range.iova & (iommu_pgsize - 1)) {
2830 			ret = -EINVAL;
2831 			goto out_unlock;
2832 		}
2833 		if (!range.size || range.size & (iommu_pgsize - 1)) {
2834 			ret = -EINVAL;
2835 			goto out_unlock;
2836 		}
2837 
2838 		if (iommu->dirty_page_tracking)
2839 			ret = vfio_iova_dirty_bitmap(range.bitmap.data,
2840 						     iommu, range.iova,
2841 						     range.size,
2842 						     range.bitmap.pgsize);
2843 		else
2844 			ret = -EINVAL;
2845 out_unlock:
2846 		mutex_unlock(&iommu->lock);
2847 
2848 		return ret;
2849 	}
2850 
2851 	return -EINVAL;
2852 }
2853 
vfio_iommu_type1_ioctl(void * iommu_data,unsigned int cmd,unsigned long arg)2854 static long vfio_iommu_type1_ioctl(void *iommu_data,
2855 				   unsigned int cmd, unsigned long arg)
2856 {
2857 	struct vfio_iommu *iommu = iommu_data;
2858 
2859 	switch (cmd) {
2860 	case VFIO_CHECK_EXTENSION:
2861 		return vfio_iommu_type1_check_extension(iommu, arg);
2862 	case VFIO_IOMMU_GET_INFO:
2863 		return vfio_iommu_type1_get_info(iommu, arg);
2864 	case VFIO_IOMMU_MAP_DMA:
2865 		return vfio_iommu_type1_map_dma(iommu, arg);
2866 	case VFIO_IOMMU_UNMAP_DMA:
2867 		return vfio_iommu_type1_unmap_dma(iommu, arg);
2868 	case VFIO_IOMMU_DIRTY_PAGES:
2869 		return vfio_iommu_type1_dirty_pages(iommu, arg);
2870 	default:
2871 		return -ENOTTY;
2872 	}
2873 }
2874 
vfio_iommu_type1_register_notifier(void * iommu_data,unsigned long * events,struct notifier_block * nb)2875 static int vfio_iommu_type1_register_notifier(void *iommu_data,
2876 					      unsigned long *events,
2877 					      struct notifier_block *nb)
2878 {
2879 	struct vfio_iommu *iommu = iommu_data;
2880 
2881 	/* clear known events */
2882 	*events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
2883 
2884 	/* refuse to register if still events remaining */
2885 	if (*events)
2886 		return -EINVAL;
2887 
2888 	return blocking_notifier_chain_register(&iommu->notifier, nb);
2889 }
2890 
vfio_iommu_type1_unregister_notifier(void * iommu_data,struct notifier_block * nb)2891 static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
2892 						struct notifier_block *nb)
2893 {
2894 	struct vfio_iommu *iommu = iommu_data;
2895 
2896 	return blocking_notifier_chain_unregister(&iommu->notifier, nb);
2897 }
2898 
vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu * iommu,dma_addr_t user_iova,void * data,size_t count,bool write,size_t * copied)2899 static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
2900 					 dma_addr_t user_iova, void *data,
2901 					 size_t count, bool write,
2902 					 size_t *copied)
2903 {
2904 	struct mm_struct *mm;
2905 	unsigned long vaddr;
2906 	struct vfio_dma *dma;
2907 	bool kthread = current->mm == NULL;
2908 	size_t offset;
2909 
2910 	*copied = 0;
2911 
2912 	dma = vfio_find_dma(iommu, user_iova, 1);
2913 	if (!dma)
2914 		return -EINVAL;
2915 
2916 	if ((write && !(dma->prot & IOMMU_WRITE)) ||
2917 			!(dma->prot & IOMMU_READ))
2918 		return -EPERM;
2919 
2920 	mm = get_task_mm(dma->task);
2921 
2922 	if (!mm)
2923 		return -EPERM;
2924 
2925 	if (kthread)
2926 		kthread_use_mm(mm);
2927 	else if (current->mm != mm)
2928 		goto out;
2929 
2930 	offset = user_iova - dma->iova;
2931 
2932 	if (count > dma->size - offset)
2933 		count = dma->size - offset;
2934 
2935 	vaddr = dma->vaddr + offset;
2936 
2937 	if (write) {
2938 		*copied = copy_to_user((void __user *)vaddr, data,
2939 					 count) ? 0 : count;
2940 		if (*copied && iommu->dirty_page_tracking) {
2941 			unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
2942 			/*
2943 			 * Bitmap populated with the smallest supported page
2944 			 * size
2945 			 */
2946 			bitmap_set(dma->bitmap, offset >> pgshift,
2947 				   ((offset + *copied - 1) >> pgshift) -
2948 				   (offset >> pgshift) + 1);
2949 		}
2950 	} else
2951 		*copied = copy_from_user(data, (void __user *)vaddr,
2952 					   count) ? 0 : count;
2953 	if (kthread)
2954 		kthread_unuse_mm(mm);
2955 out:
2956 	mmput(mm);
2957 	return *copied ? 0 : -EFAULT;
2958 }
2959 
vfio_iommu_type1_dma_rw(void * iommu_data,dma_addr_t user_iova,void * data,size_t count,bool write)2960 static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
2961 				   void *data, size_t count, bool write)
2962 {
2963 	struct vfio_iommu *iommu = iommu_data;
2964 	int ret = 0;
2965 	size_t done;
2966 
2967 	mutex_lock(&iommu->lock);
2968 	while (count > 0) {
2969 		ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
2970 						    count, write, &done);
2971 		if (ret)
2972 			break;
2973 
2974 		count -= done;
2975 		data += done;
2976 		user_iova += done;
2977 	}
2978 
2979 	mutex_unlock(&iommu->lock);
2980 	return ret;
2981 }
2982 
2983 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
2984 	.name			= "vfio-iommu-type1",
2985 	.owner			= THIS_MODULE,
2986 	.open			= vfio_iommu_type1_open,
2987 	.release		= vfio_iommu_type1_release,
2988 	.ioctl			= vfio_iommu_type1_ioctl,
2989 	.attach_group		= vfio_iommu_type1_attach_group,
2990 	.detach_group		= vfio_iommu_type1_detach_group,
2991 	.pin_pages		= vfio_iommu_type1_pin_pages,
2992 	.unpin_pages		= vfio_iommu_type1_unpin_pages,
2993 	.register_notifier	= vfio_iommu_type1_register_notifier,
2994 	.unregister_notifier	= vfio_iommu_type1_unregister_notifier,
2995 	.dma_rw			= vfio_iommu_type1_dma_rw,
2996 };
2997 
vfio_iommu_type1_init(void)2998 static int __init vfio_iommu_type1_init(void)
2999 {
3000 	return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
3001 }
3002 
vfio_iommu_type1_cleanup(void)3003 static void __exit vfio_iommu_type1_cleanup(void)
3004 {
3005 	vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
3006 }
3007 
3008 module_init(vfio_iommu_type1_init);
3009 module_exit(vfio_iommu_type1_cleanup);
3010 
3011 MODULE_VERSION(DRIVER_VERSION);
3012 MODULE_LICENSE("GPL v2");
3013 MODULE_AUTHOR(DRIVER_AUTHOR);
3014 MODULE_DESCRIPTION(DRIVER_DESC);
3015