1 /*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32 #include <drm/drm_prime.h>
33 #include <drm/drm_gem.h>
34 #include <drm/drmP.h>
35
36 #include "drm_internal.h"
37
38 /*
39 * DMA-BUF/GEM Object references and lifetime overview:
40 *
41 * On the export the dma_buf holds a reference to the exporting GEM
42 * object. It takes this reference in handle_to_fd_ioctl, when it
43 * first calls .prime_export and stores the exporting GEM object in
44 * the dma_buf priv. This reference needs to be released when the
45 * final reference to the &dma_buf itself is dropped and its
46 * &dma_buf_ops.release function is called. For GEM-based drivers,
47 * the dma_buf should be exported using drm_gem_dmabuf_export() and
48 * then released by drm_gem_dmabuf_release().
49 *
50 * On the import the importing GEM object holds a reference to the
51 * dma_buf (which in turn holds a ref to the exporting GEM object).
52 * It takes that reference in the fd_to_handle ioctl.
53 * It calls dma_buf_get, creates an attachment to it and stores the
54 * attachment in the GEM object. When this attachment is destroyed
55 * when the imported object is destroyed, we remove the attachment
56 * and drop the reference to the dma_buf.
57 *
58 * When all the references to the &dma_buf are dropped, i.e. when
59 * userspace has closed both handles to the imported GEM object (through the
60 * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
61 * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
62 * are also gone, then the dma_buf gets destroyed. This can also happen as a
63 * part of the clean up procedure in the drm_release() function if userspace
64 * fails to properly clean up. Note that both the kernel and userspace (by
65 * keeeping the PRIME file descriptors open) can hold references onto a
66 * &dma_buf.
67 *
68 * Thus the chain of references always flows in one direction
69 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
70 *
71 * Self-importing: if userspace is using PRIME as a replacement for flink
72 * then it will get a fd->handle request for a GEM object that it created.
73 * Drivers should detect this situation and return back the gem object
74 * from the dma-buf private. Prime will do this automatically for drivers that
75 * use the drm_gem_prime_{import,export} helpers.
76 *
77 * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
78 * drivers which implement GEM interface.
79 */
80
81 struct drm_prime_member {
82 struct dma_buf *dma_buf;
83 uint32_t handle;
84
85 struct rb_node dmabuf_rb;
86 struct rb_node handle_rb;
87 };
88
89 struct drm_prime_attachment {
90 struct sg_table *sgt;
91 enum dma_data_direction dir;
92 };
93
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)94 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
95 struct dma_buf *dma_buf, uint32_t handle)
96 {
97 struct drm_prime_member *member;
98 struct rb_node **p, *rb;
99
100 member = kmalloc(sizeof(*member), GFP_KERNEL);
101 if (!member)
102 return -ENOMEM;
103
104 get_dma_buf(dma_buf);
105 member->dma_buf = dma_buf;
106 member->handle = handle;
107
108 rb = NULL;
109 p = &prime_fpriv->dmabufs.rb_node;
110 while (*p) {
111 struct drm_prime_member *pos;
112
113 rb = *p;
114 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
115 if (dma_buf > pos->dma_buf)
116 p = &rb->rb_right;
117 else
118 p = &rb->rb_left;
119 }
120 rb_link_node(&member->dmabuf_rb, rb, p);
121 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
122
123 rb = NULL;
124 p = &prime_fpriv->handles.rb_node;
125 while (*p) {
126 struct drm_prime_member *pos;
127
128 rb = *p;
129 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
130 if (handle > pos->handle)
131 p = &rb->rb_right;
132 else
133 p = &rb->rb_left;
134 }
135 rb_link_node(&member->handle_rb, rb, p);
136 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
137
138 return 0;
139 }
140
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)141 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
142 uint32_t handle)
143 {
144 struct rb_node *rb;
145
146 rb = prime_fpriv->handles.rb_node;
147 while (rb) {
148 struct drm_prime_member *member;
149
150 member = rb_entry(rb, struct drm_prime_member, handle_rb);
151 if (member->handle == handle)
152 return member->dma_buf;
153 else if (member->handle < handle)
154 rb = rb->rb_right;
155 else
156 rb = rb->rb_left;
157 }
158
159 return NULL;
160 }
161
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)162 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
163 struct dma_buf *dma_buf,
164 uint32_t *handle)
165 {
166 struct rb_node *rb;
167
168 rb = prime_fpriv->dmabufs.rb_node;
169 while (rb) {
170 struct drm_prime_member *member;
171
172 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
173 if (member->dma_buf == dma_buf) {
174 *handle = member->handle;
175 return 0;
176 } else if (member->dma_buf < dma_buf) {
177 rb = rb->rb_right;
178 } else {
179 rb = rb->rb_left;
180 }
181 }
182
183 return -ENOENT;
184 }
185
186 /**
187 * drm_gem_map_attach - dma_buf attach implementation for GEM
188 * @dma_buf: buffer to attach device to
189 * @attach: buffer attachment data
190 *
191 * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
192 * device specific attachment. This can be used as the &dma_buf_ops.attach
193 * callback.
194 *
195 * Returns 0 on success, negative error code on failure.
196 */
drm_gem_map_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)197 int drm_gem_map_attach(struct dma_buf *dma_buf,
198 struct dma_buf_attachment *attach)
199 {
200 struct drm_prime_attachment *prime_attach;
201 struct drm_gem_object *obj = dma_buf->priv;
202 struct drm_device *dev = obj->dev;
203
204 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
205 if (!prime_attach)
206 return -ENOMEM;
207
208 prime_attach->dir = DMA_NONE;
209 attach->priv = prime_attach;
210
211 if (!dev->driver->gem_prime_pin)
212 return 0;
213
214 return dev->driver->gem_prime_pin(obj);
215 }
216 EXPORT_SYMBOL(drm_gem_map_attach);
217
218 /**
219 * drm_gem_map_detach - dma_buf detach implementation for GEM
220 * @dma_buf: buffer to detach from
221 * @attach: attachment to be detached
222 *
223 * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
224 * callback.
225 */
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)226 void drm_gem_map_detach(struct dma_buf *dma_buf,
227 struct dma_buf_attachment *attach)
228 {
229 struct drm_prime_attachment *prime_attach = attach->priv;
230 struct drm_gem_object *obj = dma_buf->priv;
231 struct drm_device *dev = obj->dev;
232
233 if (prime_attach) {
234 struct sg_table *sgt = prime_attach->sgt;
235
236 if (sgt) {
237 if (prime_attach->dir != DMA_NONE)
238 dma_unmap_sg_attrs(attach->dev, sgt->sgl,
239 sgt->nents,
240 prime_attach->dir,
241 DMA_ATTR_SKIP_CPU_SYNC);
242 sg_free_table(sgt);
243 }
244
245 kfree(sgt);
246 kfree(prime_attach);
247 attach->priv = NULL;
248 }
249
250 if (dev->driver->gem_prime_unpin)
251 dev->driver->gem_prime_unpin(obj);
252 }
253 EXPORT_SYMBOL(drm_gem_map_detach);
254
drm_prime_remove_buf_handle_locked(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf)255 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
256 struct dma_buf *dma_buf)
257 {
258 struct rb_node *rb;
259
260 rb = prime_fpriv->dmabufs.rb_node;
261 while (rb) {
262 struct drm_prime_member *member;
263
264 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
265 if (member->dma_buf == dma_buf) {
266 rb_erase(&member->handle_rb, &prime_fpriv->handles);
267 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
268
269 dma_buf_put(dma_buf);
270 kfree(member);
271 return;
272 } else if (member->dma_buf < dma_buf) {
273 rb = rb->rb_right;
274 } else {
275 rb = rb->rb_left;
276 }
277 }
278 }
279
280 /**
281 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
282 * @attach: attachment whose scatterlist is to be returned
283 * @dir: direction of DMA transfer
284 *
285 * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
286 * can be used as the &dma_buf_ops.map_dma_buf callback.
287 *
288 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
289 * on error. May return -EINTR if it is interrupted by a signal.
290 */
291
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)292 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
293 enum dma_data_direction dir)
294 {
295 struct drm_prime_attachment *prime_attach = attach->priv;
296 struct drm_gem_object *obj = attach->dmabuf->priv;
297 struct sg_table *sgt;
298
299 if (WARN_ON(dir == DMA_NONE || !prime_attach))
300 return ERR_PTR(-EINVAL);
301
302 /* return the cached mapping when possible */
303 if (prime_attach->dir == dir)
304 return prime_attach->sgt;
305
306 /*
307 * two mappings with different directions for the same attachment are
308 * not allowed
309 */
310 if (WARN_ON(prime_attach->dir != DMA_NONE))
311 return ERR_PTR(-EBUSY);
312
313 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
314
315 if (!IS_ERR(sgt)) {
316 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
317 DMA_ATTR_SKIP_CPU_SYNC)) {
318 sg_free_table(sgt);
319 kfree(sgt);
320 sgt = ERR_PTR(-ENOMEM);
321 } else {
322 prime_attach->sgt = sgt;
323 prime_attach->dir = dir;
324 }
325 }
326
327 return sgt;
328 }
329 EXPORT_SYMBOL(drm_gem_map_dma_buf);
330
331 /**
332 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
333 * @attach: attachment to unmap buffer from
334 * @sgt: scatterlist info of the buffer to unmap
335 * @dir: direction of DMA transfer
336 *
337 * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
338 * used as the &dma_buf_ops.unmap_dma_buf callback.
339 */
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)340 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
341 struct sg_table *sgt,
342 enum dma_data_direction dir)
343 {
344 /* nothing to be done here */
345 }
346 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
347
348 /**
349 * drm_gem_dmabuf_export - dma_buf export implementation for GEM
350 * @dev: parent device for the exported dmabuf
351 * @exp_info: the export information used by dma_buf_export()
352 *
353 * This wraps dma_buf_export() for use by generic GEM drivers that are using
354 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
355 * a reference to the &drm_device and the exported &drm_gem_object (stored in
356 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
357 *
358 * Returns the new dmabuf.
359 */
drm_gem_dmabuf_export(struct drm_device * dev,struct dma_buf_export_info * exp_info)360 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
361 struct dma_buf_export_info *exp_info)
362 {
363 struct dma_buf *dma_buf;
364
365 dma_buf = dma_buf_export(exp_info);
366 if (IS_ERR(dma_buf))
367 return dma_buf;
368
369 drm_dev_get(dev);
370 drm_gem_object_get(exp_info->priv);
371
372 return dma_buf;
373 }
374 EXPORT_SYMBOL(drm_gem_dmabuf_export);
375
376 /**
377 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
378 * @dma_buf: buffer to be released
379 *
380 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
381 * must use this in their dma_buf ops structure as the release callback.
382 * drm_gem_dmabuf_release() should be used in conjunction with
383 * drm_gem_dmabuf_export().
384 */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)385 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
386 {
387 struct drm_gem_object *obj = dma_buf->priv;
388 struct drm_device *dev = obj->dev;
389
390 /* drop the reference on the export fd holds */
391 drm_gem_object_put_unlocked(obj);
392
393 drm_dev_put(dev);
394 }
395 EXPORT_SYMBOL(drm_gem_dmabuf_release);
396
397 /**
398 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
399 * @dma_buf: buffer to be mapped
400 *
401 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
402 * callback.
403 *
404 * Returns the kernel virtual address.
405 */
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf)406 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
407 {
408 struct drm_gem_object *obj = dma_buf->priv;
409 struct drm_device *dev = obj->dev;
410
411 if (dev->driver->gem_prime_vmap)
412 return dev->driver->gem_prime_vmap(obj);
413 else
414 return NULL;
415 }
416 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
417
418 /**
419 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
420 * @dma_buf: buffer to be unmapped
421 * @vaddr: the virtual address of the buffer
422 *
423 * Releases a kernel virtual mapping. This can be used as the
424 * &dma_buf_ops.vunmap callback.
425 */
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)426 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
427 {
428 struct drm_gem_object *obj = dma_buf->priv;
429 struct drm_device *dev = obj->dev;
430
431 if (dev->driver->gem_prime_vunmap)
432 dev->driver->gem_prime_vunmap(obj, vaddr);
433 }
434 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
435
436 /**
437 * drm_gem_dmabuf_kmap - map implementation for GEM
438 * @dma_buf: buffer to be mapped
439 * @page_num: page number within the buffer
440 *
441 * Not implemented. This can be used as the &dma_buf_ops.map callback.
442 */
drm_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)443 void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
444 {
445 return NULL;
446 }
447 EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
448
449 /**
450 * drm_gem_dmabuf_kunmap - unmap implementation for GEM
451 * @dma_buf: buffer to be unmapped
452 * @page_num: page number within the buffer
453 * @addr: virtual address of the buffer
454 *
455 * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
456 */
drm_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)457 void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
458 void *addr)
459 {
460
461 }
462 EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
463
464 /**
465 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
466 * @dma_buf: buffer to be mapped
467 * @vma: virtual address range
468 *
469 * Provides memory mapping for the buffer. This can be used as the
470 * &dma_buf_ops.mmap callback.
471 *
472 * Returns 0 on success or a negative error code on failure.
473 */
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)474 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
475 {
476 struct drm_gem_object *obj = dma_buf->priv;
477 struct drm_device *dev = obj->dev;
478
479 if (!dev->driver->gem_prime_mmap)
480 return -ENOSYS;
481
482 return dev->driver->gem_prime_mmap(obj, vma);
483 }
484 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
485
486 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
487 .attach = drm_gem_map_attach,
488 .detach = drm_gem_map_detach,
489 .map_dma_buf = drm_gem_map_dma_buf,
490 .unmap_dma_buf = drm_gem_unmap_dma_buf,
491 .release = drm_gem_dmabuf_release,
492 .map = drm_gem_dmabuf_kmap,
493 .unmap = drm_gem_dmabuf_kunmap,
494 .mmap = drm_gem_dmabuf_mmap,
495 .vmap = drm_gem_dmabuf_vmap,
496 .vunmap = drm_gem_dmabuf_vunmap,
497 };
498
499 /**
500 * DOC: PRIME Helpers
501 *
502 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
503 * simpler APIs by using the helper functions @drm_gem_prime_export and
504 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
505 * six lower-level driver callbacks:
506 *
507 * Export callbacks:
508 *
509 * * @gem_prime_pin (optional): prepare a GEM object for exporting
510 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
511 * * @gem_prime_vmap: vmap a buffer exported by your driver
512 * * @gem_prime_vunmap: vunmap a buffer exported by your driver
513 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
514 *
515 * Import callback:
516 *
517 * * @gem_prime_import_sg_table (import): produce a GEM object from another
518 * driver's scatter/gather table
519 */
520
521 /**
522 * drm_gem_prime_export - helper library implementation of the export callback
523 * @dev: drm_device to export from
524 * @obj: GEM object to export
525 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
526 *
527 * This is the implementation of the gem_prime_export functions for GEM drivers
528 * using the PRIME helpers.
529 */
drm_gem_prime_export(struct drm_device * dev,struct drm_gem_object * obj,int flags)530 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
531 struct drm_gem_object *obj,
532 int flags)
533 {
534 struct dma_buf_export_info exp_info = {
535 .exp_name = KBUILD_MODNAME, /* white lie for debug */
536 .owner = dev->driver->fops->owner,
537 .ops = &drm_gem_prime_dmabuf_ops,
538 .size = obj->size,
539 .flags = flags,
540 .priv = obj,
541 };
542
543 if (dev->driver->gem_prime_res_obj)
544 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
545
546 return drm_gem_dmabuf_export(dev, &exp_info);
547 }
548 EXPORT_SYMBOL(drm_gem_prime_export);
549
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)550 static struct dma_buf *export_and_register_object(struct drm_device *dev,
551 struct drm_gem_object *obj,
552 uint32_t flags)
553 {
554 struct dma_buf *dmabuf;
555
556 /* prevent races with concurrent gem_close. */
557 if (obj->handle_count == 0) {
558 dmabuf = ERR_PTR(-ENOENT);
559 return dmabuf;
560 }
561
562 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
563 if (IS_ERR(dmabuf)) {
564 /* normally the created dma-buf takes ownership of the ref,
565 * but if that fails then drop the ref
566 */
567 return dmabuf;
568 }
569
570 /*
571 * Note that callers do not need to clean up the export cache
572 * since the check for obj->handle_count guarantees that someone
573 * will clean it up.
574 */
575 obj->dma_buf = dmabuf;
576 get_dma_buf(obj->dma_buf);
577
578 return dmabuf;
579 }
580
581 /**
582 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
583 * @dev: dev to export the buffer from
584 * @file_priv: drm file-private structure
585 * @handle: buffer handle to export
586 * @flags: flags like DRM_CLOEXEC
587 * @prime_fd: pointer to storage for the fd id of the create dma-buf
588 *
589 * This is the PRIME export function which must be used mandatorily by GEM
590 * drivers to ensure correct lifetime management of the underlying GEM object.
591 * The actual exporting from GEM object to a dma-buf is done through the
592 * gem_prime_export driver callback.
593 */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)594 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
595 struct drm_file *file_priv, uint32_t handle,
596 uint32_t flags,
597 int *prime_fd)
598 {
599 struct drm_gem_object *obj;
600 int ret = 0;
601 struct dma_buf *dmabuf;
602
603 mutex_lock(&file_priv->prime.lock);
604 obj = drm_gem_object_lookup(file_priv, handle);
605 if (!obj) {
606 ret = -ENOENT;
607 goto out_unlock;
608 }
609
610 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
611 if (dmabuf) {
612 get_dma_buf(dmabuf);
613 goto out_have_handle;
614 }
615
616 mutex_lock(&dev->object_name_lock);
617 /* re-export the original imported object */
618 if (obj->import_attach) {
619 dmabuf = obj->import_attach->dmabuf;
620 get_dma_buf(dmabuf);
621 goto out_have_obj;
622 }
623
624 if (obj->dma_buf) {
625 get_dma_buf(obj->dma_buf);
626 dmabuf = obj->dma_buf;
627 goto out_have_obj;
628 }
629
630 dmabuf = export_and_register_object(dev, obj, flags);
631 if (IS_ERR(dmabuf)) {
632 /* normally the created dma-buf takes ownership of the ref,
633 * but if that fails then drop the ref
634 */
635 ret = PTR_ERR(dmabuf);
636 mutex_unlock(&dev->object_name_lock);
637 goto out;
638 }
639
640 out_have_obj:
641 /*
642 * If we've exported this buffer then cheat and add it to the import list
643 * so we get the correct handle back. We must do this under the
644 * protection of dev->object_name_lock to ensure that a racing gem close
645 * ioctl doesn't miss to remove this buffer handle from the cache.
646 */
647 ret = drm_prime_add_buf_handle(&file_priv->prime,
648 dmabuf, handle);
649 mutex_unlock(&dev->object_name_lock);
650 if (ret)
651 goto fail_put_dmabuf;
652
653 out_have_handle:
654 ret = dma_buf_fd(dmabuf, flags);
655 /*
656 * We must _not_ remove the buffer from the handle cache since the newly
657 * created dma buf is already linked in the global obj->dma_buf pointer,
658 * and that is invariant as long as a userspace gem handle exists.
659 * Closing the handle will clean out the cache anyway, so we don't leak.
660 */
661 if (ret < 0) {
662 goto fail_put_dmabuf;
663 } else {
664 *prime_fd = ret;
665 ret = 0;
666 }
667
668 goto out;
669
670 fail_put_dmabuf:
671 dma_buf_put(dmabuf);
672 out:
673 drm_gem_object_put_unlocked(obj);
674 out_unlock:
675 mutex_unlock(&file_priv->prime.lock);
676
677 return ret;
678 }
679 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
680
681 /**
682 * drm_gem_prime_import_dev - core implementation of the import callback
683 * @dev: drm_device to import into
684 * @dma_buf: dma-buf object to import
685 * @attach_dev: struct device to dma_buf attach
686 *
687 * This is the core of drm_gem_prime_import. It's designed to be called by
688 * drivers who want to use a different device structure than dev->dev for
689 * attaching via dma_buf.
690 */
drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,struct device * attach_dev)691 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
692 struct dma_buf *dma_buf,
693 struct device *attach_dev)
694 {
695 struct dma_buf_attachment *attach;
696 struct sg_table *sgt;
697 struct drm_gem_object *obj;
698 int ret;
699
700 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
701 obj = dma_buf->priv;
702 if (obj->dev == dev) {
703 /*
704 * Importing dmabuf exported from out own gem increases
705 * refcount on gem itself instead of f_count of dmabuf.
706 */
707 drm_gem_object_get(obj);
708 return obj;
709 }
710 }
711
712 if (!dev->driver->gem_prime_import_sg_table)
713 return ERR_PTR(-EINVAL);
714
715 attach = dma_buf_attach(dma_buf, attach_dev);
716 if (IS_ERR(attach))
717 return ERR_CAST(attach);
718
719 get_dma_buf(dma_buf);
720
721 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
722 if (IS_ERR(sgt)) {
723 ret = PTR_ERR(sgt);
724 goto fail_detach;
725 }
726
727 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
728 if (IS_ERR(obj)) {
729 ret = PTR_ERR(obj);
730 goto fail_unmap;
731 }
732
733 obj->import_attach = attach;
734
735 return obj;
736
737 fail_unmap:
738 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
739 fail_detach:
740 dma_buf_detach(dma_buf, attach);
741 dma_buf_put(dma_buf);
742
743 return ERR_PTR(ret);
744 }
745 EXPORT_SYMBOL(drm_gem_prime_import_dev);
746
747 /**
748 * drm_gem_prime_import - helper library implementation of the import callback
749 * @dev: drm_device to import into
750 * @dma_buf: dma-buf object to import
751 *
752 * This is the implementation of the gem_prime_import functions for GEM drivers
753 * using the PRIME helpers.
754 */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)755 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
756 struct dma_buf *dma_buf)
757 {
758 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
759 }
760 EXPORT_SYMBOL(drm_gem_prime_import);
761
762 /**
763 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
764 * @dev: dev to export the buffer from
765 * @file_priv: drm file-private structure
766 * @prime_fd: fd id of the dma-buf which should be imported
767 * @handle: pointer to storage for the handle of the imported buffer object
768 *
769 * This is the PRIME import function which must be used mandatorily by GEM
770 * drivers to ensure correct lifetime management of the underlying GEM object.
771 * The actual importing of GEM object from the dma-buf is done through the
772 * gem_import_export driver callback.
773 */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)774 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
775 struct drm_file *file_priv, int prime_fd,
776 uint32_t *handle)
777 {
778 struct dma_buf *dma_buf;
779 struct drm_gem_object *obj;
780 int ret;
781
782 dma_buf = dma_buf_get(prime_fd);
783 if (IS_ERR(dma_buf))
784 return PTR_ERR(dma_buf);
785
786 mutex_lock(&file_priv->prime.lock);
787
788 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
789 dma_buf, handle);
790 if (ret == 0)
791 goto out_put;
792
793 /* never seen this one, need to import */
794 mutex_lock(&dev->object_name_lock);
795 obj = dev->driver->gem_prime_import(dev, dma_buf);
796 if (IS_ERR(obj)) {
797 ret = PTR_ERR(obj);
798 goto out_unlock;
799 }
800
801 if (obj->dma_buf) {
802 WARN_ON(obj->dma_buf != dma_buf);
803 } else {
804 obj->dma_buf = dma_buf;
805 get_dma_buf(dma_buf);
806 }
807
808 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
809 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
810 drm_gem_object_put_unlocked(obj);
811 if (ret)
812 goto out_put;
813
814 ret = drm_prime_add_buf_handle(&file_priv->prime,
815 dma_buf, *handle);
816 mutex_unlock(&file_priv->prime.lock);
817 if (ret)
818 goto fail;
819
820 dma_buf_put(dma_buf);
821
822 return 0;
823
824 fail:
825 /* hmm, if driver attached, we are relying on the free-object path
826 * to detach.. which seems ok..
827 */
828 drm_gem_handle_delete(file_priv, *handle);
829 dma_buf_put(dma_buf);
830 return ret;
831
832 out_unlock:
833 mutex_unlock(&dev->object_name_lock);
834 out_put:
835 mutex_unlock(&file_priv->prime.lock);
836 dma_buf_put(dma_buf);
837 return ret;
838 }
839 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
840
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)841 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
842 struct drm_file *file_priv)
843 {
844 struct drm_prime_handle *args = data;
845
846 if (!drm_core_check_feature(dev, DRIVER_PRIME))
847 return -EINVAL;
848
849 if (!dev->driver->prime_handle_to_fd)
850 return -ENOSYS;
851
852 /* check flags are valid */
853 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
854 return -EINVAL;
855
856 return dev->driver->prime_handle_to_fd(dev, file_priv,
857 args->handle, args->flags, &args->fd);
858 }
859
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)860 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
861 struct drm_file *file_priv)
862 {
863 struct drm_prime_handle *args = data;
864
865 if (!drm_core_check_feature(dev, DRIVER_PRIME))
866 return -EINVAL;
867
868 if (!dev->driver->prime_fd_to_handle)
869 return -ENOSYS;
870
871 return dev->driver->prime_fd_to_handle(dev, file_priv,
872 args->fd, &args->handle);
873 }
874
875 /**
876 * drm_prime_pages_to_sg - converts a page array into an sg list
877 * @pages: pointer to the array of page pointers to convert
878 * @nr_pages: length of the page vector
879 *
880 * This helper creates an sg table object from a set of pages
881 * the driver is responsible for mapping the pages into the
882 * importers address space for use with dma_buf itself.
883 */
drm_prime_pages_to_sg(struct page ** pages,unsigned int nr_pages)884 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
885 {
886 struct sg_table *sg = NULL;
887 int ret;
888
889 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
890 if (!sg) {
891 ret = -ENOMEM;
892 goto out;
893 }
894
895 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
896 nr_pages << PAGE_SHIFT, GFP_KERNEL);
897 if (ret)
898 goto out;
899
900 return sg;
901 out:
902 kfree(sg);
903 return ERR_PTR(ret);
904 }
905 EXPORT_SYMBOL(drm_prime_pages_to_sg);
906
907 /**
908 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
909 * @sgt: scatter-gather table to convert
910 * @pages: optional array of page pointers to store the page array in
911 * @addrs: optional array to store the dma bus address of each page
912 * @max_entries: size of both the passed-in arrays
913 *
914 * Exports an sg table into an array of pages and addresses. This is currently
915 * required by the TTM driver in order to do correct fault handling.
916 */
drm_prime_sg_to_page_addr_arrays(struct sg_table * sgt,struct page ** pages,dma_addr_t * addrs,int max_entries)917 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
918 dma_addr_t *addrs, int max_entries)
919 {
920 unsigned count;
921 struct scatterlist *sg;
922 struct page *page;
923 u32 len, index;
924 dma_addr_t addr;
925
926 index = 0;
927 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
928 len = sg->length;
929 page = sg_page(sg);
930 addr = sg_dma_address(sg);
931
932 while (len > 0) {
933 if (WARN_ON(index >= max_entries))
934 return -1;
935 if (pages)
936 pages[index] = page;
937 if (addrs)
938 addrs[index] = addr;
939
940 page++;
941 addr += PAGE_SIZE;
942 len -= PAGE_SIZE;
943 index++;
944 }
945 }
946 return 0;
947 }
948 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
949
950 /**
951 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
952 * @obj: GEM object which was created from a dma-buf
953 * @sg: the sg-table which was pinned at import time
954 *
955 * This is the cleanup functions which GEM drivers need to call when they use
956 * @drm_gem_prime_import to import dma-bufs.
957 */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)958 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
959 {
960 struct dma_buf_attachment *attach;
961 struct dma_buf *dma_buf;
962 attach = obj->import_attach;
963 if (sg)
964 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
965 dma_buf = attach->dmabuf;
966 dma_buf_detach(attach->dmabuf, attach);
967 /* remove the reference */
968 dma_buf_put(dma_buf);
969 }
970 EXPORT_SYMBOL(drm_prime_gem_destroy);
971
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)972 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
973 {
974 mutex_init(&prime_fpriv->lock);
975 prime_fpriv->dmabufs = RB_ROOT;
976 prime_fpriv->handles = RB_ROOT;
977 }
978
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)979 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
980 {
981 /* by now drm_gem_release should've made sure the list is empty */
982 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
983 }
984