1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Xen dma-buf functionality for gntdev.
5  *
6  * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7  *
8  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 
18 #include <xen/xen.h>
19 #include <xen/grant_table.h>
20 
21 #include "gntdev-common.h"
22 #include "gntdev-dmabuf.h"
23 
24 #ifndef GRANT_INVALID_REF
25 /*
26  * Note on usage of grant reference 0 as invalid grant reference:
27  * grant reference 0 is valid, but never exposed to a driver,
28  * because of the fact it is already in use/reserved by the PV console.
29  */
30 #define GRANT_INVALID_REF	0
31 #endif
32 
33 struct gntdev_dmabuf {
34 	struct gntdev_dmabuf_priv *priv;
35 	struct dma_buf *dmabuf;
36 	struct list_head next;
37 	int fd;
38 
39 	union {
40 		struct {
41 			/* Exported buffers are reference counted. */
42 			struct kref refcount;
43 
44 			struct gntdev_priv *priv;
45 			struct gntdev_grant_map *map;
46 		} exp;
47 		struct {
48 			/* Granted references of the imported buffer. */
49 			grant_ref_t *refs;
50 			/* Scatter-gather table of the imported buffer. */
51 			struct sg_table *sgt;
52 			/* dma-buf attachment of the imported buffer. */
53 			struct dma_buf_attachment *attach;
54 		} imp;
55 	} u;
56 
57 	/* Number of pages this buffer has. */
58 	int nr_pages;
59 	/* Pages of this buffer. */
60 	struct page **pages;
61 };
62 
63 struct gntdev_dmabuf_wait_obj {
64 	struct list_head next;
65 	struct gntdev_dmabuf *gntdev_dmabuf;
66 	struct completion completion;
67 };
68 
69 struct gntdev_dmabuf_attachment {
70 	struct sg_table *sgt;
71 	enum dma_data_direction dir;
72 };
73 
74 struct gntdev_dmabuf_priv {
75 	/* List of exported DMA buffers. */
76 	struct list_head exp_list;
77 	/* List of wait objects. */
78 	struct list_head exp_wait_list;
79 	/* List of imported DMA buffers. */
80 	struct list_head imp_list;
81 	/* This is the lock which protects dma_buf_xxx lists. */
82 	struct mutex lock;
83 };
84 
85 /* DMA buffer export support. */
86 
87 /* Implementation of wait for exported DMA buffer to be released. */
88 
89 static void dmabuf_exp_release(struct kref *kref);
90 
91 static struct gntdev_dmabuf_wait_obj *
dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf * gntdev_dmabuf)92 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
93 			struct gntdev_dmabuf *gntdev_dmabuf)
94 {
95 	struct gntdev_dmabuf_wait_obj *obj;
96 
97 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
98 	if (!obj)
99 		return ERR_PTR(-ENOMEM);
100 
101 	init_completion(&obj->completion);
102 	obj->gntdev_dmabuf = gntdev_dmabuf;
103 
104 	mutex_lock(&priv->lock);
105 	list_add(&obj->next, &priv->exp_wait_list);
106 	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
107 	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
108 	mutex_unlock(&priv->lock);
109 	return obj;
110 }
111 
dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf_wait_obj * obj)112 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
113 				     struct gntdev_dmabuf_wait_obj *obj)
114 {
115 	mutex_lock(&priv->lock);
116 	list_del(&obj->next);
117 	mutex_unlock(&priv->lock);
118 	kfree(obj);
119 }
120 
dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj * obj,u32 wait_to_ms)121 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
122 				    u32 wait_to_ms)
123 {
124 	if (wait_for_completion_timeout(&obj->completion,
125 			msecs_to_jiffies(wait_to_ms)) <= 0)
126 		return -ETIMEDOUT;
127 
128 	return 0;
129 }
130 
dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv * priv,struct gntdev_dmabuf * gntdev_dmabuf)131 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
132 				       struct gntdev_dmabuf *gntdev_dmabuf)
133 {
134 	struct gntdev_dmabuf_wait_obj *obj;
135 
136 	list_for_each_entry(obj, &priv->exp_wait_list, next)
137 		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
138 			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
139 			complete_all(&obj->completion);
140 			break;
141 		}
142 }
143 
144 static struct gntdev_dmabuf *
dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv * priv,int fd)145 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
146 {
147 	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
148 
149 	mutex_lock(&priv->lock);
150 	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
151 		if (gntdev_dmabuf->fd == fd) {
152 			pr_debug("Found gntdev_dmabuf in the wait list\n");
153 			kref_get(&gntdev_dmabuf->u.exp.refcount);
154 			ret = gntdev_dmabuf;
155 			break;
156 		}
157 	mutex_unlock(&priv->lock);
158 	return ret;
159 }
160 
dmabuf_exp_wait_released(struct gntdev_dmabuf_priv * priv,int fd,int wait_to_ms)161 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
162 				    int wait_to_ms)
163 {
164 	struct gntdev_dmabuf *gntdev_dmabuf;
165 	struct gntdev_dmabuf_wait_obj *obj;
166 	int ret;
167 
168 	pr_debug("Will wait for dma-buf with fd %d\n", fd);
169 	/*
170 	 * Try to find the DMA buffer: if not found means that
171 	 * either the buffer has already been released or file descriptor
172 	 * provided is wrong.
173 	 */
174 	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
175 	if (IS_ERR(gntdev_dmabuf))
176 		return PTR_ERR(gntdev_dmabuf);
177 
178 	/*
179 	 * gntdev_dmabuf still exists and is reference count locked by us now,
180 	 * so prepare to wait: allocate wait object and add it to the wait list,
181 	 * so we can find it on release.
182 	 */
183 	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
184 	if (IS_ERR(obj))
185 		return PTR_ERR(obj);
186 
187 	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
188 	dmabuf_exp_wait_obj_free(priv, obj);
189 	return ret;
190 }
191 
192 /* DMA buffer export support. */
193 
194 static struct sg_table *
dmabuf_pages_to_sgt(struct page ** pages,unsigned int nr_pages)195 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
196 {
197 	struct sg_table *sgt;
198 	int ret;
199 
200 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
201 	if (!sgt) {
202 		ret = -ENOMEM;
203 		goto out;
204 	}
205 
206 	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
207 					nr_pages << PAGE_SHIFT,
208 					GFP_KERNEL);
209 	if (ret)
210 		goto out;
211 
212 	return sgt;
213 
214 out:
215 	kfree(sgt);
216 	return ERR_PTR(ret);
217 }
218 
dmabuf_exp_ops_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)219 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
220 				 struct dma_buf_attachment *attach)
221 {
222 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
223 
224 	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
225 				       GFP_KERNEL);
226 	if (!gntdev_dmabuf_attach)
227 		return -ENOMEM;
228 
229 	gntdev_dmabuf_attach->dir = DMA_NONE;
230 	attach->priv = gntdev_dmabuf_attach;
231 	return 0;
232 }
233 
dmabuf_exp_ops_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)234 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
235 				  struct dma_buf_attachment *attach)
236 {
237 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
238 
239 	if (gntdev_dmabuf_attach) {
240 		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
241 
242 		if (sgt) {
243 			if (gntdev_dmabuf_attach->dir != DMA_NONE)
244 				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
245 						   sgt->nents,
246 						   gntdev_dmabuf_attach->dir,
247 						   DMA_ATTR_SKIP_CPU_SYNC);
248 			sg_free_table(sgt);
249 		}
250 
251 		kfree(sgt);
252 		kfree(gntdev_dmabuf_attach);
253 		attach->priv = NULL;
254 	}
255 }
256 
257 static struct sg_table *
dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)258 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
259 			   enum dma_data_direction dir)
260 {
261 	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
262 	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
263 	struct sg_table *sgt;
264 
265 	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
266 		 attach->dev);
267 
268 	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
269 		return ERR_PTR(-EINVAL);
270 
271 	/* Return the cached mapping when possible. */
272 	if (gntdev_dmabuf_attach->dir == dir)
273 		return gntdev_dmabuf_attach->sgt;
274 
275 	/*
276 	 * Two mappings with different directions for the same attachment are
277 	 * not allowed.
278 	 */
279 	if (gntdev_dmabuf_attach->dir != DMA_NONE)
280 		return ERR_PTR(-EBUSY);
281 
282 	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
283 				  gntdev_dmabuf->nr_pages);
284 	if (!IS_ERR(sgt)) {
285 		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
286 				      DMA_ATTR_SKIP_CPU_SYNC)) {
287 			sg_free_table(sgt);
288 			kfree(sgt);
289 			sgt = ERR_PTR(-ENOMEM);
290 		} else {
291 			gntdev_dmabuf_attach->sgt = sgt;
292 			gntdev_dmabuf_attach->dir = dir;
293 		}
294 	}
295 	if (IS_ERR(sgt))
296 		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
297 	return sgt;
298 }
299 
dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)300 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
301 					 struct sg_table *sgt,
302 					 enum dma_data_direction dir)
303 {
304 	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
305 }
306 
dmabuf_exp_release(struct kref * kref)307 static void dmabuf_exp_release(struct kref *kref)
308 {
309 	struct gntdev_dmabuf *gntdev_dmabuf =
310 		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
311 
312 	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
313 	list_del(&gntdev_dmabuf->next);
314 	kfree(gntdev_dmabuf);
315 }
316 
dmabuf_exp_remove_map(struct gntdev_priv * priv,struct gntdev_grant_map * map)317 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
318 				  struct gntdev_grant_map *map)
319 {
320 	mutex_lock(&priv->lock);
321 	list_del(&map->next);
322 	gntdev_put_map(NULL /* already removed */, map);
323 	mutex_unlock(&priv->lock);
324 }
325 
dmabuf_exp_ops_release(struct dma_buf * dma_buf)326 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
327 {
328 	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
329 	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
330 
331 	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
332 			      gntdev_dmabuf->u.exp.map);
333 	mutex_lock(&priv->lock);
334 	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
335 	mutex_unlock(&priv->lock);
336 }
337 
dmabuf_exp_ops_kmap(struct dma_buf * dma_buf,unsigned long page_num)338 static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
339 				 unsigned long page_num)
340 {
341 	/* Not implemented. */
342 	return NULL;
343 }
344 
dmabuf_exp_ops_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)345 static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
346 				  unsigned long page_num, void *addr)
347 {
348 	/* Not implemented. */
349 }
350 
dmabuf_exp_ops_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)351 static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
352 			       struct vm_area_struct *vma)
353 {
354 	/* Not implemented. */
355 	return 0;
356 }
357 
358 static const struct dma_buf_ops dmabuf_exp_ops =  {
359 	.attach = dmabuf_exp_ops_attach,
360 	.detach = dmabuf_exp_ops_detach,
361 	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
362 	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
363 	.release = dmabuf_exp_ops_release,
364 	.map = dmabuf_exp_ops_kmap,
365 	.unmap = dmabuf_exp_ops_kunmap,
366 	.mmap = dmabuf_exp_ops_mmap,
367 };
368 
369 struct gntdev_dmabuf_export_args {
370 	struct gntdev_priv *priv;
371 	struct gntdev_grant_map *map;
372 	struct gntdev_dmabuf_priv *dmabuf_priv;
373 	struct device *dev;
374 	int count;
375 	struct page **pages;
376 	u32 fd;
377 };
378 
dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args * args)379 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
380 {
381 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
382 	struct gntdev_dmabuf *gntdev_dmabuf;
383 	int ret;
384 
385 	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
386 	if (!gntdev_dmabuf)
387 		return -ENOMEM;
388 
389 	kref_init(&gntdev_dmabuf->u.exp.refcount);
390 
391 	gntdev_dmabuf->priv = args->dmabuf_priv;
392 	gntdev_dmabuf->nr_pages = args->count;
393 	gntdev_dmabuf->pages = args->pages;
394 	gntdev_dmabuf->u.exp.priv = args->priv;
395 	gntdev_dmabuf->u.exp.map = args->map;
396 
397 	exp_info.exp_name = KBUILD_MODNAME;
398 	if (args->dev->driver && args->dev->driver->owner)
399 		exp_info.owner = args->dev->driver->owner;
400 	else
401 		exp_info.owner = THIS_MODULE;
402 	exp_info.ops = &dmabuf_exp_ops;
403 	exp_info.size = args->count << PAGE_SHIFT;
404 	exp_info.flags = O_RDWR;
405 	exp_info.priv = gntdev_dmabuf;
406 
407 	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
408 	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
409 		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
410 		gntdev_dmabuf->dmabuf = NULL;
411 		goto fail;
412 	}
413 
414 	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
415 	if (ret < 0)
416 		goto fail;
417 
418 	gntdev_dmabuf->fd = ret;
419 	args->fd = ret;
420 
421 	pr_debug("Exporting DMA buffer with fd %d\n", ret);
422 
423 	mutex_lock(&args->dmabuf_priv->lock);
424 	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
425 	mutex_unlock(&args->dmabuf_priv->lock);
426 	return 0;
427 
428 fail:
429 	if (gntdev_dmabuf->dmabuf)
430 		dma_buf_put(gntdev_dmabuf->dmabuf);
431 	kfree(gntdev_dmabuf);
432 	return ret;
433 }
434 
435 static struct gntdev_grant_map *
dmabuf_exp_alloc_backing_storage(struct gntdev_priv * priv,int dmabuf_flags,int count)436 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
437 				 int count)
438 {
439 	struct gntdev_grant_map *map;
440 
441 	if (unlikely(count <= 0))
442 		return ERR_PTR(-EINVAL);
443 
444 	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
445 	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
446 		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
447 		return ERR_PTR(-EINVAL);
448 	}
449 
450 	map = gntdev_alloc_map(priv, count, dmabuf_flags);
451 	if (!map)
452 		return ERR_PTR(-ENOMEM);
453 
454 	if (unlikely(gntdev_account_mapped_pages(count))) {
455 		pr_debug("can't map %d pages: over limit\n", count);
456 		gntdev_put_map(NULL, map);
457 		return ERR_PTR(-ENOMEM);
458 	}
459 	return map;
460 }
461 
dmabuf_exp_from_refs(struct gntdev_priv * priv,int flags,int count,u32 domid,u32 * refs,u32 * fd)462 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
463 				int count, u32 domid, u32 *refs, u32 *fd)
464 {
465 	struct gntdev_grant_map *map;
466 	struct gntdev_dmabuf_export_args args;
467 	int i, ret;
468 
469 	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
470 	if (IS_ERR(map))
471 		return PTR_ERR(map);
472 
473 	for (i = 0; i < count; i++) {
474 		map->grants[i].domid = domid;
475 		map->grants[i].ref = refs[i];
476 	}
477 
478 	mutex_lock(&priv->lock);
479 	gntdev_add_map(priv, map);
480 	mutex_unlock(&priv->lock);
481 
482 	map->flags |= GNTMAP_host_map;
483 #if defined(CONFIG_X86)
484 	map->flags |= GNTMAP_device_map;
485 #endif
486 
487 	ret = gntdev_map_grant_pages(map);
488 	if (ret < 0)
489 		goto out;
490 
491 	args.priv = priv;
492 	args.map = map;
493 	args.dev = priv->dma_dev;
494 	args.dmabuf_priv = priv->dmabuf_priv;
495 	args.count = map->count;
496 	args.pages = map->pages;
497 	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
498 
499 	ret = dmabuf_exp_from_pages(&args);
500 	if (ret < 0)
501 		goto out;
502 
503 	*fd = args.fd;
504 	return 0;
505 
506 out:
507 	dmabuf_exp_remove_map(priv, map);
508 	return ret;
509 }
510 
511 /* DMA buffer import support. */
512 
513 static int
dmabuf_imp_grant_foreign_access(struct page ** pages,u32 * refs,int count,int domid)514 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
515 				int count, int domid)
516 {
517 	grant_ref_t priv_gref_head;
518 	int i, ret;
519 
520 	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
521 	if (ret < 0) {
522 		pr_debug("Cannot allocate grant references, ret %d\n", ret);
523 		return ret;
524 	}
525 
526 	for (i = 0; i < count; i++) {
527 		int cur_ref;
528 
529 		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
530 		if (cur_ref < 0) {
531 			ret = cur_ref;
532 			pr_debug("Cannot claim grant reference, ret %d\n", ret);
533 			goto out;
534 		}
535 
536 		gnttab_grant_foreign_access_ref(cur_ref, domid,
537 						xen_page_to_gfn(pages[i]), 0);
538 		refs[i] = cur_ref;
539 	}
540 
541 	return 0;
542 
543 out:
544 	gnttab_free_grant_references(priv_gref_head);
545 	return ret;
546 }
547 
dmabuf_imp_end_foreign_access(u32 * refs,int count)548 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
549 {
550 	int i;
551 
552 	for (i = 0; i < count; i++)
553 		if (refs[i] != GRANT_INVALID_REF)
554 			gnttab_end_foreign_access(refs[i], 0, 0UL);
555 }
556 
dmabuf_imp_free_storage(struct gntdev_dmabuf * gntdev_dmabuf)557 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
558 {
559 	kfree(gntdev_dmabuf->pages);
560 	kfree(gntdev_dmabuf->u.imp.refs);
561 	kfree(gntdev_dmabuf);
562 }
563 
dmabuf_imp_alloc_storage(int count)564 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
565 {
566 	struct gntdev_dmabuf *gntdev_dmabuf;
567 	int i;
568 
569 	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
570 	if (!gntdev_dmabuf)
571 		goto fail_no_free;
572 
573 	gntdev_dmabuf->u.imp.refs = kcalloc(count,
574 					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
575 					    GFP_KERNEL);
576 	if (!gntdev_dmabuf->u.imp.refs)
577 		goto fail;
578 
579 	gntdev_dmabuf->pages = kcalloc(count,
580 				       sizeof(gntdev_dmabuf->pages[0]),
581 				       GFP_KERNEL);
582 	if (!gntdev_dmabuf->pages)
583 		goto fail;
584 
585 	gntdev_dmabuf->nr_pages = count;
586 
587 	for (i = 0; i < count; i++)
588 		gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
589 
590 	return gntdev_dmabuf;
591 
592 fail:
593 	dmabuf_imp_free_storage(gntdev_dmabuf);
594 fail_no_free:
595 	return ERR_PTR(-ENOMEM);
596 }
597 
598 static struct gntdev_dmabuf *
dmabuf_imp_to_refs(struct gntdev_dmabuf_priv * priv,struct device * dev,int fd,int count,int domid)599 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
600 		   int fd, int count, int domid)
601 {
602 	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
603 	struct dma_buf *dma_buf;
604 	struct dma_buf_attachment *attach;
605 	struct sg_table *sgt;
606 	struct sg_page_iter sg_iter;
607 	int i;
608 
609 	dma_buf = dma_buf_get(fd);
610 	if (IS_ERR(dma_buf))
611 		return ERR_CAST(dma_buf);
612 
613 	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
614 	if (IS_ERR(gntdev_dmabuf)) {
615 		ret = gntdev_dmabuf;
616 		goto fail_put;
617 	}
618 
619 	gntdev_dmabuf->priv = priv;
620 	gntdev_dmabuf->fd = fd;
621 
622 	attach = dma_buf_attach(dma_buf, dev);
623 	if (IS_ERR(attach)) {
624 		ret = ERR_CAST(attach);
625 		goto fail_free_obj;
626 	}
627 
628 	gntdev_dmabuf->u.imp.attach = attach;
629 
630 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
631 	if (IS_ERR(sgt)) {
632 		ret = ERR_CAST(sgt);
633 		goto fail_detach;
634 	}
635 
636 	/* Check number of pages that imported buffer has. */
637 	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
638 		ret = ERR_PTR(-EINVAL);
639 		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
640 			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
641 		goto fail_unmap;
642 	}
643 
644 	gntdev_dmabuf->u.imp.sgt = sgt;
645 
646 	/* Now convert sgt to array of pages and check for page validity. */
647 	i = 0;
648 	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
649 		struct page *page = sg_page_iter_page(&sg_iter);
650 		/*
651 		 * Check if page is valid: this can happen if we are given
652 		 * a page from VRAM or other resources which are not backed
653 		 * by a struct page.
654 		 */
655 		if (!pfn_valid(page_to_pfn(page))) {
656 			ret = ERR_PTR(-EINVAL);
657 			goto fail_unmap;
658 		}
659 
660 		gntdev_dmabuf->pages[i++] = page;
661 	}
662 
663 	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
664 						      gntdev_dmabuf->u.imp.refs,
665 						      count, domid));
666 	if (IS_ERR(ret))
667 		goto fail_end_access;
668 
669 	pr_debug("Imported DMA buffer with fd %d\n", fd);
670 
671 	mutex_lock(&priv->lock);
672 	list_add(&gntdev_dmabuf->next, &priv->imp_list);
673 	mutex_unlock(&priv->lock);
674 
675 	return gntdev_dmabuf;
676 
677 fail_end_access:
678 	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
679 fail_unmap:
680 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
681 fail_detach:
682 	dma_buf_detach(dma_buf, attach);
683 fail_free_obj:
684 	dmabuf_imp_free_storage(gntdev_dmabuf);
685 fail_put:
686 	dma_buf_put(dma_buf);
687 	return ret;
688 }
689 
690 /*
691  * Find the hyper dma-buf by its file descriptor and remove
692  * it from the buffer's list.
693  */
694 static struct gntdev_dmabuf *
dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv * priv,int fd)695 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
696 {
697 	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
698 
699 	mutex_lock(&priv->lock);
700 	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
701 		if (gntdev_dmabuf->fd == fd) {
702 			pr_debug("Found gntdev_dmabuf in the import list\n");
703 			ret = gntdev_dmabuf;
704 			list_del(&gntdev_dmabuf->next);
705 			break;
706 		}
707 	}
708 	mutex_unlock(&priv->lock);
709 	return ret;
710 }
711 
dmabuf_imp_release(struct gntdev_dmabuf_priv * priv,u32 fd)712 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
713 {
714 	struct gntdev_dmabuf *gntdev_dmabuf;
715 	struct dma_buf_attachment *attach;
716 	struct dma_buf *dma_buf;
717 
718 	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
719 	if (IS_ERR(gntdev_dmabuf))
720 		return PTR_ERR(gntdev_dmabuf);
721 
722 	pr_debug("Releasing DMA buffer with fd %d\n", fd);
723 
724 	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
725 				      gntdev_dmabuf->nr_pages);
726 
727 	attach = gntdev_dmabuf->u.imp.attach;
728 
729 	if (gntdev_dmabuf->u.imp.sgt)
730 		dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
731 					 DMA_BIDIRECTIONAL);
732 	dma_buf = attach->dmabuf;
733 	dma_buf_detach(attach->dmabuf, attach);
734 	dma_buf_put(dma_buf);
735 
736 	dmabuf_imp_free_storage(gntdev_dmabuf);
737 	return 0;
738 }
739 
740 /* DMA buffer IOCTL support. */
741 
gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv * priv,int use_ptemod,struct ioctl_gntdev_dmabuf_exp_from_refs __user * u)742 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
743 				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
744 {
745 	struct ioctl_gntdev_dmabuf_exp_from_refs op;
746 	u32 *refs;
747 	long ret;
748 
749 	if (use_ptemod) {
750 		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
751 			 use_ptemod);
752 		return -EINVAL;
753 	}
754 
755 	if (copy_from_user(&op, u, sizeof(op)) != 0)
756 		return -EFAULT;
757 
758 	if (unlikely(op.count <= 0))
759 		return -EINVAL;
760 
761 	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
762 	if (!refs)
763 		return -ENOMEM;
764 
765 	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
766 		ret = -EFAULT;
767 		goto out;
768 	}
769 
770 	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
771 				   op.domid, refs, &op.fd);
772 	if (ret)
773 		goto out;
774 
775 	if (copy_to_user(u, &op, sizeof(op)) != 0)
776 		ret = -EFAULT;
777 
778 out:
779 	kfree(refs);
780 	return ret;
781 }
782 
gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_exp_wait_released __user * u)783 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
784 					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
785 {
786 	struct ioctl_gntdev_dmabuf_exp_wait_released op;
787 
788 	if (copy_from_user(&op, u, sizeof(op)) != 0)
789 		return -EFAULT;
790 
791 	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
792 					op.wait_to_ms);
793 }
794 
gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_imp_to_refs __user * u)795 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
796 				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
797 {
798 	struct ioctl_gntdev_dmabuf_imp_to_refs op;
799 	struct gntdev_dmabuf *gntdev_dmabuf;
800 	long ret;
801 
802 	if (copy_from_user(&op, u, sizeof(op)) != 0)
803 		return -EFAULT;
804 
805 	if (unlikely(op.count <= 0))
806 		return -EINVAL;
807 
808 	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
809 					   priv->dma_dev, op.fd,
810 					   op.count, op.domid);
811 	if (IS_ERR(gntdev_dmabuf))
812 		return PTR_ERR(gntdev_dmabuf);
813 
814 	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
815 			 sizeof(*u->refs) * op.count) != 0) {
816 		ret = -EFAULT;
817 		goto out_release;
818 	}
819 	return 0;
820 
821 out_release:
822 	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
823 	return ret;
824 }
825 
gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv * priv,struct ioctl_gntdev_dmabuf_imp_release __user * u)826 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
827 				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
828 {
829 	struct ioctl_gntdev_dmabuf_imp_release op;
830 
831 	if (copy_from_user(&op, u, sizeof(op)) != 0)
832 		return -EFAULT;
833 
834 	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
835 }
836 
gntdev_dmabuf_init(void)837 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
838 {
839 	struct gntdev_dmabuf_priv *priv;
840 
841 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
842 	if (!priv)
843 		return ERR_PTR(-ENOMEM);
844 
845 	mutex_init(&priv->lock);
846 	INIT_LIST_HEAD(&priv->exp_list);
847 	INIT_LIST_HEAD(&priv->exp_wait_list);
848 	INIT_LIST_HEAD(&priv->imp_list);
849 
850 	return priv;
851 }
852 
gntdev_dmabuf_fini(struct gntdev_dmabuf_priv * priv)853 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
854 {
855 	kfree(priv);
856 }
857