1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_managed.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_vma_manager.h>
50
51 #include "drm_internal.h"
52
53 /** @file drm_gem.c
54 *
55 * This file provides some of the base ioctls and library routines for
56 * the graphics memory manager implemented by each device driver.
57 *
58 * Because various devices have different requirements in terms of
59 * synchronization and migration strategies, implementing that is left up to
60 * the driver, and all that the general API provides should be generic --
61 * allocating objects, reading/writing data with the cpu, freeing objects.
62 * Even there, platform-dependent optimizations for reading/writing data with
63 * the CPU mean we'll likely hook those out to driver-specific calls. However,
64 * the DRI2 implementation wants to have at least allocate/mmap be generic.
65 *
66 * The goal was to have swap-backed object allocation managed through
67 * struct file. However, file descriptors as handles to a struct file have
68 * two major failings:
69 * - Process limits prevent more than 1024 or so being used at a time by
70 * default.
71 * - Inability to allocate high fds will aggravate the X Server's select()
72 * handling, and likely that of many GL client applications as well.
73 *
74 * This led to a plan of using our own integer IDs (called handles, following
75 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
76 * ioctls. The objects themselves will still include the struct file so
77 * that we can transition to fds if the required kernel infrastructure shows
78 * up at a later date, and as our interface with shmfs for memory allocation.
79 */
80
81 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)82 drm_gem_init_release(struct drm_device *dev, void *ptr)
83 {
84 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
85 }
86
87 /**
88 * drm_gem_init - Initialize the GEM device fields
89 * @dev: drm_devic structure to initialize
90 */
91 int
drm_gem_init(struct drm_device * dev)92 drm_gem_init(struct drm_device *dev)
93 {
94 struct drm_vma_offset_manager *vma_offset_manager;
95
96 mutex_init(&dev->object_name_lock);
97 idr_init_base(&dev->object_name_idr, 1);
98
99 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
100 GFP_KERNEL);
101 if (!vma_offset_manager) {
102 DRM_ERROR("out of memory\n");
103 return -ENOMEM;
104 }
105
106 dev->vma_offset_manager = vma_offset_manager;
107 drm_vma_offset_manager_init(vma_offset_manager,
108 DRM_FILE_PAGE_OFFSET_START,
109 DRM_FILE_PAGE_OFFSET_SIZE);
110
111 return drmm_add_action(dev, drm_gem_init_release, NULL);
112 }
113
114 /**
115 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
116 * @dev: drm_device the object should be initialized for
117 * @obj: drm_gem_object to initialize
118 * @size: object size
119 *
120 * Initialize an already allocated GEM object of the specified size with
121 * shmfs backing store.
122 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)123 int drm_gem_object_init(struct drm_device *dev,
124 struct drm_gem_object *obj, size_t size)
125 {
126 struct file *filp;
127
128 drm_gem_private_object_init(dev, obj, size);
129
130 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 if (IS_ERR(filp))
132 return PTR_ERR(filp);
133
134 obj->filp = filp;
135
136 return 0;
137 }
138 EXPORT_SYMBOL(drm_gem_object_init);
139
140 /**
141 * drm_gem_private_object_init - initialize an allocated private GEM object
142 * @dev: drm_device the object should be initialized for
143 * @obj: drm_gem_object to initialize
144 * @size: object size
145 *
146 * Initialize an already allocated GEM object of the specified size with
147 * no GEM provided backing store. Instead the caller is responsible for
148 * backing the object and handling it.
149 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)150 void drm_gem_private_object_init(struct drm_device *dev,
151 struct drm_gem_object *obj, size_t size)
152 {
153 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154
155 obj->dev = dev;
156 obj->filp = NULL;
157
158 kref_init(&obj->refcount);
159 obj->handle_count = 0;
160 obj->size = size;
161 dma_resv_init(&obj->_resv);
162 if (!obj->resv)
163 obj->resv = &obj->_resv;
164
165 drm_vma_node_reset(&obj->vma_node);
166 }
167 EXPORT_SYMBOL(drm_gem_private_object_init);
168
169 static void
drm_gem_remove_prime_handles(struct drm_gem_object * obj,struct drm_file * filp)170 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
171 {
172 /*
173 * Note: obj->dma_buf can't disappear as long as we still hold a
174 * handle reference in obj->handle_count.
175 */
176 mutex_lock(&filp->prime.lock);
177 if (obj->dma_buf) {
178 drm_prime_remove_buf_handle_locked(&filp->prime,
179 obj->dma_buf);
180 }
181 mutex_unlock(&filp->prime.lock);
182 }
183
184 /**
185 * drm_gem_object_handle_free - release resources bound to userspace handles
186 * @obj: GEM object to clean up.
187 *
188 * Called after the last handle to the object has been closed
189 *
190 * Removes any name for the object. Note that this must be
191 * called before drm_gem_object_free or we'll be touching
192 * freed memory
193 */
drm_gem_object_handle_free(struct drm_gem_object * obj)194 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
195 {
196 struct drm_device *dev = obj->dev;
197
198 /* Remove any name for this object */
199 if (obj->name) {
200 idr_remove(&dev->object_name_idr, obj->name);
201 obj->name = 0;
202 }
203 }
204
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)205 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
206 {
207 /* Unbreak the reference cycle if we have an exported dma_buf. */
208 if (obj->dma_buf) {
209 dma_buf_put(obj->dma_buf);
210 obj->dma_buf = NULL;
211 }
212 }
213
214 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)215 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
216 {
217 struct drm_device *dev = obj->dev;
218 bool final = false;
219
220 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
221 return;
222
223 /*
224 * Must bump handle count first as this may be the last
225 * ref, in which case the object would disappear before we
226 * checked for a name
227 */
228
229 mutex_lock(&dev->object_name_lock);
230 if (--obj->handle_count == 0) {
231 drm_gem_object_handle_free(obj);
232 drm_gem_object_exported_dma_buf_free(obj);
233 final = true;
234 }
235 mutex_unlock(&dev->object_name_lock);
236
237 if (final)
238 drm_gem_object_put(obj);
239 }
240
241 /*
242 * Called at device or object close to release the file's
243 * handle references on objects.
244 */
245 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)246 drm_gem_object_release_handle(int id, void *ptr, void *data)
247 {
248 struct drm_file *file_priv = data;
249 struct drm_gem_object *obj = ptr;
250 struct drm_device *dev = obj->dev;
251
252 if (obj->funcs && obj->funcs->close)
253 obj->funcs->close(obj, file_priv);
254 else if (dev->driver->gem_close_object)
255 dev->driver->gem_close_object(obj, file_priv);
256
257 drm_gem_remove_prime_handles(obj, file_priv);
258 drm_vma_node_revoke(&obj->vma_node, file_priv);
259
260 drm_gem_object_handle_put_unlocked(obj);
261
262 return 0;
263 }
264
265 /**
266 * drm_gem_handle_delete - deletes the given file-private handle
267 * @filp: drm file-private structure to use for the handle look up
268 * @handle: userspace handle to delete
269 *
270 * Removes the GEM handle from the @filp lookup table which has been added with
271 * drm_gem_handle_create(). If this is the last handle also cleans up linked
272 * resources like GEM names.
273 */
274 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)275 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
276 {
277 struct drm_gem_object *obj;
278
279 spin_lock(&filp->table_lock);
280
281 /* Check if we currently have a reference on the object */
282 obj = idr_replace(&filp->object_idr, NULL, handle);
283 spin_unlock(&filp->table_lock);
284 if (IS_ERR_OR_NULL(obj))
285 return -EINVAL;
286
287 /* Release driver's reference and decrement refcount. */
288 drm_gem_object_release_handle(handle, obj, filp);
289
290 /* And finally make the handle available for future allocations. */
291 spin_lock(&filp->table_lock);
292 idr_remove(&filp->object_idr, handle);
293 spin_unlock(&filp->table_lock);
294
295 return 0;
296 }
297 EXPORT_SYMBOL(drm_gem_handle_delete);
298
299 /**
300 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
301 * @file: drm file-private structure containing the gem object
302 * @dev: corresponding drm_device
303 * @handle: gem object handle
304 * @offset: return location for the fake mmap offset
305 *
306 * This implements the &drm_driver.dumb_map_offset kms driver callback for
307 * drivers which use gem to manage their backing storage.
308 *
309 * Returns:
310 * 0 on success or a negative error code on failure.
311 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)312 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
313 u32 handle, u64 *offset)
314 {
315 struct drm_gem_object *obj;
316 int ret;
317
318 obj = drm_gem_object_lookup(file, handle);
319 if (!obj)
320 return -ENOENT;
321
322 /* Don't allow imported objects to be mapped */
323 if (obj->import_attach) {
324 ret = -EINVAL;
325 goto out;
326 }
327
328 ret = drm_gem_create_mmap_offset(obj);
329 if (ret)
330 goto out;
331
332 *offset = drm_vma_node_offset_addr(&obj->vma_node);
333 out:
334 drm_gem_object_put(obj);
335
336 return ret;
337 }
338 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
339
340 /**
341 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
342 * @file: drm file-private structure to remove the dumb handle from
343 * @dev: corresponding drm_device
344 * @handle: the dumb handle to remove
345 *
346 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
347 * which use gem to manage their backing storage.
348 */
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)349 int drm_gem_dumb_destroy(struct drm_file *file,
350 struct drm_device *dev,
351 uint32_t handle)
352 {
353 return drm_gem_handle_delete(file, handle);
354 }
355 EXPORT_SYMBOL(drm_gem_dumb_destroy);
356
357 /**
358 * drm_gem_handle_create_tail - internal functions to create a handle
359 * @file_priv: drm file-private structure to register the handle for
360 * @obj: object to register
361 * @handlep: pointer to return the created handle to the caller
362 *
363 * This expects the &drm_device.object_name_lock to be held already and will
364 * drop it before returning. Used to avoid races in establishing new handles
365 * when importing an object from either an flink name or a dma-buf.
366 *
367 * Handles must be release again through drm_gem_handle_delete(). This is done
368 * when userspace closes @file_priv for all attached handles, or through the
369 * GEM_CLOSE ioctl for individual handles.
370 */
371 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)372 drm_gem_handle_create_tail(struct drm_file *file_priv,
373 struct drm_gem_object *obj,
374 u32 *handlep)
375 {
376 struct drm_device *dev = obj->dev;
377 u32 handle;
378 int ret;
379
380 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
381 if (obj->handle_count++ == 0)
382 drm_gem_object_get(obj);
383
384 /*
385 * Get the user-visible handle using idr. Preload and perform
386 * allocation under our spinlock.
387 */
388 idr_preload(GFP_KERNEL);
389 spin_lock(&file_priv->table_lock);
390
391 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
392
393 spin_unlock(&file_priv->table_lock);
394 idr_preload_end();
395
396 mutex_unlock(&dev->object_name_lock);
397 if (ret < 0)
398 goto err_unref;
399
400 handle = ret;
401
402 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
403 if (ret)
404 goto err_remove;
405
406 if (obj->funcs && obj->funcs->open) {
407 ret = obj->funcs->open(obj, file_priv);
408 if (ret)
409 goto err_revoke;
410 } else if (dev->driver->gem_open_object) {
411 ret = dev->driver->gem_open_object(obj, file_priv);
412 if (ret)
413 goto err_revoke;
414 }
415
416 *handlep = handle;
417 return 0;
418
419 err_revoke:
420 drm_vma_node_revoke(&obj->vma_node, file_priv);
421 err_remove:
422 spin_lock(&file_priv->table_lock);
423 idr_remove(&file_priv->object_idr, handle);
424 spin_unlock(&file_priv->table_lock);
425 err_unref:
426 drm_gem_object_handle_put_unlocked(obj);
427 return ret;
428 }
429
430 /**
431 * drm_gem_handle_create - create a gem handle for an object
432 * @file_priv: drm file-private structure to register the handle for
433 * @obj: object to register
434 * @handlep: pointer to return the created handle to the caller
435 *
436 * Create a handle for this object. This adds a handle reference to the object,
437 * which includes a regular reference count. Callers will likely want to
438 * dereference the object afterwards.
439 *
440 * Since this publishes @obj to userspace it must be fully set up by this point,
441 * drivers must call this last in their buffer object creation callbacks.
442 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)443 int drm_gem_handle_create(struct drm_file *file_priv,
444 struct drm_gem_object *obj,
445 u32 *handlep)
446 {
447 mutex_lock(&obj->dev->object_name_lock);
448
449 return drm_gem_handle_create_tail(file_priv, obj, handlep);
450 }
451 EXPORT_SYMBOL(drm_gem_handle_create);
452
453
454 /**
455 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
456 * @obj: obj in question
457 *
458 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
459 *
460 * Note that drm_gem_object_release() already calls this function, so drivers
461 * don't have to take care of releasing the mmap offset themselves when freeing
462 * the GEM object.
463 */
464 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)465 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
466 {
467 struct drm_device *dev = obj->dev;
468
469 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
470 }
471 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
472
473 /**
474 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
475 * @obj: obj in question
476 * @size: the virtual size
477 *
478 * GEM memory mapping works by handing back to userspace a fake mmap offset
479 * it can use in a subsequent mmap(2) call. The DRM core code then looks
480 * up the object based on the offset and sets up the various memory mapping
481 * structures.
482 *
483 * This routine allocates and attaches a fake offset for @obj, in cases where
484 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
485 * Otherwise just use drm_gem_create_mmap_offset().
486 *
487 * This function is idempotent and handles an already allocated mmap offset
488 * transparently. Drivers do not need to check for this case.
489 */
490 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)491 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
492 {
493 struct drm_device *dev = obj->dev;
494
495 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
496 size / PAGE_SIZE);
497 }
498 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
499
500 /**
501 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
502 * @obj: obj in question
503 *
504 * GEM memory mapping works by handing back to userspace a fake mmap offset
505 * it can use in a subsequent mmap(2) call. The DRM core code then looks
506 * up the object based on the offset and sets up the various memory mapping
507 * structures.
508 *
509 * This routine allocates and attaches a fake offset for @obj.
510 *
511 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
512 * the fake offset again.
513 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)514 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
515 {
516 return drm_gem_create_mmap_offset_size(obj, obj->size);
517 }
518 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
519
520 /*
521 * Move pages to appropriate lru and release the pagevec, decrementing the
522 * ref count of those pages.
523 */
drm_gem_check_release_pagevec(struct pagevec * pvec)524 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
525 {
526 check_move_unevictable_pages(pvec);
527 __pagevec_release(pvec);
528 cond_resched();
529 }
530
531 /**
532 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
533 * from shmem
534 * @obj: obj in question
535 *
536 * This reads the page-array of the shmem-backing storage of the given gem
537 * object. An array of pages is returned. If a page is not allocated or
538 * swapped-out, this will allocate/swap-in the required pages. Note that the
539 * whole object is covered by the page-array and pinned in memory.
540 *
541 * Use drm_gem_put_pages() to release the array and unpin all pages.
542 *
543 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
544 * If you require other GFP-masks, you have to do those allocations yourself.
545 *
546 * Note that you are not allowed to change gfp-zones during runtime. That is,
547 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
548 * set during initialization. If you have special zone constraints, set them
549 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
550 * to keep pages in the required zone during swap-in.
551 *
552 * This function is only valid on objects initialized with
553 * drm_gem_object_init(), but not for those initialized with
554 * drm_gem_private_object_init() only.
555 */
drm_gem_get_pages(struct drm_gem_object * obj)556 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
557 {
558 struct address_space *mapping;
559 struct page *p, **pages;
560 struct pagevec pvec;
561 int i, npages;
562
563
564 if (WARN_ON(!obj->filp))
565 return ERR_PTR(-EINVAL);
566
567 /* This is the shared memory object that backs the GEM resource */
568 mapping = obj->filp->f_mapping;
569
570 /* We already BUG_ON() for non-page-aligned sizes in
571 * drm_gem_object_init(), so we should never hit this unless
572 * driver author is doing something really wrong:
573 */
574 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
575
576 npages = obj->size >> PAGE_SHIFT;
577
578 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
579 if (pages == NULL)
580 return ERR_PTR(-ENOMEM);
581
582 mapping_set_unevictable(mapping);
583
584 for (i = 0; i < npages; i++) {
585 p = shmem_read_mapping_page(mapping, i);
586 if (IS_ERR(p))
587 goto fail;
588 pages[i] = p;
589
590 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
591 * correct region during swapin. Note that this requires
592 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
593 * so shmem can relocate pages during swapin if required.
594 */
595 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
596 (page_to_pfn(p) >= 0x00100000UL));
597 }
598
599 return pages;
600
601 fail:
602 mapping_clear_unevictable(mapping);
603 pagevec_init(&pvec);
604 while (i--) {
605 if (!pagevec_add(&pvec, pages[i]))
606 drm_gem_check_release_pagevec(&pvec);
607 }
608 if (pagevec_count(&pvec))
609 drm_gem_check_release_pagevec(&pvec);
610
611 kvfree(pages);
612 return ERR_CAST(p);
613 }
614 EXPORT_SYMBOL(drm_gem_get_pages);
615
616 /**
617 * drm_gem_put_pages - helper to free backing pages for a GEM object
618 * @obj: obj in question
619 * @pages: pages to free
620 * @dirty: if true, pages will be marked as dirty
621 * @accessed: if true, the pages will be marked as accessed
622 */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)623 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
624 bool dirty, bool accessed)
625 {
626 int i, npages;
627 struct address_space *mapping;
628 struct pagevec pvec;
629
630 mapping = file_inode(obj->filp)->i_mapping;
631 mapping_clear_unevictable(mapping);
632
633 /* We already BUG_ON() for non-page-aligned sizes in
634 * drm_gem_object_init(), so we should never hit this unless
635 * driver author is doing something really wrong:
636 */
637 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
638
639 npages = obj->size >> PAGE_SHIFT;
640
641 pagevec_init(&pvec);
642 for (i = 0; i < npages; i++) {
643 if (!pages[i])
644 continue;
645
646 if (dirty)
647 set_page_dirty(pages[i]);
648
649 if (accessed)
650 mark_page_accessed(pages[i]);
651
652 /* Undo the reference we took when populating the table */
653 if (!pagevec_add(&pvec, pages[i]))
654 drm_gem_check_release_pagevec(&pvec);
655 }
656 if (pagevec_count(&pvec))
657 drm_gem_check_release_pagevec(&pvec);
658
659 kvfree(pages);
660 }
661 EXPORT_SYMBOL(drm_gem_put_pages);
662
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)663 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
664 struct drm_gem_object **objs)
665 {
666 int i, ret = 0;
667 struct drm_gem_object *obj;
668
669 spin_lock(&filp->table_lock);
670
671 for (i = 0; i < count; i++) {
672 /* Check if we currently have a reference on the object */
673 obj = idr_find(&filp->object_idr, handle[i]);
674 if (!obj) {
675 ret = -ENOENT;
676 break;
677 }
678 drm_gem_object_get(obj);
679 objs[i] = obj;
680 }
681 spin_unlock(&filp->table_lock);
682
683 return ret;
684 }
685
686 /**
687 * drm_gem_objects_lookup - look up GEM objects from an array of handles
688 * @filp: DRM file private date
689 * @bo_handles: user pointer to array of userspace handle
690 * @count: size of handle array
691 * @objs_out: returned pointer to array of drm_gem_object pointers
692 *
693 * Takes an array of userspace handles and returns a newly allocated array of
694 * GEM objects.
695 *
696 * For a single handle lookup, use drm_gem_object_lookup().
697 *
698 * Returns:
699 *
700 * @objs filled in with GEM object pointers. Returned GEM objects need to be
701 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
702 * failure. 0 is returned on success.
703 *
704 */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)705 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
706 int count, struct drm_gem_object ***objs_out)
707 {
708 int ret;
709 u32 *handles;
710 struct drm_gem_object **objs;
711
712 if (!count)
713 return 0;
714
715 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
716 GFP_KERNEL | __GFP_ZERO);
717 if (!objs)
718 return -ENOMEM;
719
720 *objs_out = objs;
721
722 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
723 if (!handles) {
724 ret = -ENOMEM;
725 goto out;
726 }
727
728 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
729 ret = -EFAULT;
730 DRM_DEBUG("Failed to copy in GEM handles\n");
731 goto out;
732 }
733
734 ret = objects_lookup(filp, handles, count, objs);
735 out:
736 kvfree(handles);
737 return ret;
738
739 }
740 EXPORT_SYMBOL(drm_gem_objects_lookup);
741
742 /**
743 * drm_gem_object_lookup - look up a GEM object from its handle
744 * @filp: DRM file private date
745 * @handle: userspace handle
746 *
747 * Returns:
748 *
749 * A reference to the object named by the handle if such exists on @filp, NULL
750 * otherwise.
751 *
752 * If looking up an array of handles, use drm_gem_objects_lookup().
753 */
754 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)755 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
756 {
757 struct drm_gem_object *obj = NULL;
758
759 objects_lookup(filp, &handle, 1, &obj);
760 return obj;
761 }
762 EXPORT_SYMBOL(drm_gem_object_lookup);
763
764 /**
765 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
766 * shared and/or exclusive fences.
767 * @filep: DRM file private date
768 * @handle: userspace handle
769 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
770 * @timeout: timeout value in jiffies or zero to return immediately
771 *
772 * Returns:
773 *
774 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
775 * greater than 0 on success.
776 */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)777 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
778 bool wait_all, unsigned long timeout)
779 {
780 long ret;
781 struct drm_gem_object *obj;
782
783 obj = drm_gem_object_lookup(filep, handle);
784 if (!obj) {
785 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
786 return -EINVAL;
787 }
788
789 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
790 true, timeout);
791 if (ret == 0)
792 ret = -ETIME;
793 else if (ret > 0)
794 ret = 0;
795
796 drm_gem_object_put(obj);
797
798 return ret;
799 }
800 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
801
802 /**
803 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
804 * @dev: drm_device
805 * @data: ioctl data
806 * @file_priv: drm file-private structure
807 *
808 * Releases the handle to an mm object.
809 */
810 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)811 drm_gem_close_ioctl(struct drm_device *dev, void *data,
812 struct drm_file *file_priv)
813 {
814 struct drm_gem_close *args = data;
815 int ret;
816
817 if (!drm_core_check_feature(dev, DRIVER_GEM))
818 return -EOPNOTSUPP;
819
820 ret = drm_gem_handle_delete(file_priv, args->handle);
821
822 return ret;
823 }
824
825 /**
826 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
827 * @dev: drm_device
828 * @data: ioctl data
829 * @file_priv: drm file-private structure
830 *
831 * Create a global name for an object, returning the name.
832 *
833 * Note that the name does not hold a reference; when the object
834 * is freed, the name goes away.
835 */
836 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)837 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
838 struct drm_file *file_priv)
839 {
840 struct drm_gem_flink *args = data;
841 struct drm_gem_object *obj;
842 int ret;
843
844 if (!drm_core_check_feature(dev, DRIVER_GEM))
845 return -EOPNOTSUPP;
846
847 obj = drm_gem_object_lookup(file_priv, args->handle);
848 if (obj == NULL)
849 return -ENOENT;
850
851 mutex_lock(&dev->object_name_lock);
852 /* prevent races with concurrent gem_close. */
853 if (obj->handle_count == 0) {
854 ret = -ENOENT;
855 goto err;
856 }
857
858 if (!obj->name) {
859 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
860 if (ret < 0)
861 goto err;
862
863 obj->name = ret;
864 }
865
866 args->name = (uint64_t) obj->name;
867 ret = 0;
868
869 err:
870 mutex_unlock(&dev->object_name_lock);
871 drm_gem_object_put(obj);
872 return ret;
873 }
874
875 /**
876 * drm_gem_open - implementation of the GEM_OPEN ioctl
877 * @dev: drm_device
878 * @data: ioctl data
879 * @file_priv: drm file-private structure
880 *
881 * Open an object using the global name, returning a handle and the size.
882 *
883 * This handle (of course) holds a reference to the object, so the object
884 * will not go away until the handle is deleted.
885 */
886 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)887 drm_gem_open_ioctl(struct drm_device *dev, void *data,
888 struct drm_file *file_priv)
889 {
890 struct drm_gem_open *args = data;
891 struct drm_gem_object *obj;
892 int ret;
893 u32 handle;
894
895 if (!drm_core_check_feature(dev, DRIVER_GEM))
896 return -EOPNOTSUPP;
897
898 mutex_lock(&dev->object_name_lock);
899 obj = idr_find(&dev->object_name_idr, (int) args->name);
900 if (obj) {
901 drm_gem_object_get(obj);
902 } else {
903 mutex_unlock(&dev->object_name_lock);
904 return -ENOENT;
905 }
906
907 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
908 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
909 if (ret)
910 goto err;
911
912 args->handle = handle;
913 args->size = obj->size;
914
915 err:
916 drm_gem_object_put(obj);
917 return ret;
918 }
919
920 /**
921 * gem_gem_open - initalizes GEM file-private structures at devnode open time
922 * @dev: drm_device which is being opened by userspace
923 * @file_private: drm file-private structure to set up
924 *
925 * Called at device open time, sets up the structure for handling refcounting
926 * of mm objects.
927 */
928 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)929 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
930 {
931 idr_init_base(&file_private->object_idr, 1);
932 spin_lock_init(&file_private->table_lock);
933 }
934
935 /**
936 * drm_gem_release - release file-private GEM resources
937 * @dev: drm_device which is being closed by userspace
938 * @file_private: drm file-private structure to clean up
939 *
940 * Called at close time when the filp is going away.
941 *
942 * Releases any remaining references on objects by this filp.
943 */
944 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)945 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
946 {
947 idr_for_each(&file_private->object_idr,
948 &drm_gem_object_release_handle, file_private);
949 idr_destroy(&file_private->object_idr);
950 }
951
952 /**
953 * drm_gem_object_release - release GEM buffer object resources
954 * @obj: GEM buffer object
955 *
956 * This releases any structures and resources used by @obj and is the invers of
957 * drm_gem_object_init().
958 */
959 void
drm_gem_object_release(struct drm_gem_object * obj)960 drm_gem_object_release(struct drm_gem_object *obj)
961 {
962 WARN_ON(obj->dma_buf);
963
964 if (obj->filp)
965 fput(obj->filp);
966
967 dma_resv_fini(&obj->_resv);
968 drm_gem_free_mmap_offset(obj);
969 }
970 EXPORT_SYMBOL(drm_gem_object_release);
971
972 /**
973 * drm_gem_object_free - free a GEM object
974 * @kref: kref of the object to free
975 *
976 * Called after the last reference to the object has been lost.
977 *
978 * Frees the object
979 */
980 void
drm_gem_object_free(struct kref * kref)981 drm_gem_object_free(struct kref *kref)
982 {
983 struct drm_gem_object *obj =
984 container_of(kref, struct drm_gem_object, refcount);
985 struct drm_device *dev = obj->dev;
986
987 if (obj->funcs)
988 obj->funcs->free(obj);
989 else if (dev->driver->gem_free_object_unlocked)
990 dev->driver->gem_free_object_unlocked(obj);
991 }
992 EXPORT_SYMBOL(drm_gem_object_free);
993
994 /**
995 * drm_gem_object_put_locked - release a GEM buffer object reference
996 * @obj: GEM buffer object
997 *
998 * This releases a reference to @obj. Callers must hold the
999 * &drm_device.struct_mutex lock when calling this function, even when the
1000 * driver doesn't use &drm_device.struct_mutex for anything.
1001 *
1002 * For drivers not encumbered with legacy locking use
1003 * drm_gem_object_put() instead.
1004 */
1005 void
drm_gem_object_put_locked(struct drm_gem_object * obj)1006 drm_gem_object_put_locked(struct drm_gem_object *obj)
1007 {
1008 if (obj) {
1009 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1010
1011 kref_put(&obj->refcount, drm_gem_object_free);
1012 }
1013 }
1014 EXPORT_SYMBOL(drm_gem_object_put_locked);
1015
1016 /**
1017 * drm_gem_vm_open - vma->ops->open implementation for GEM
1018 * @vma: VM area structure
1019 *
1020 * This function implements the #vm_operations_struct open() callback for GEM
1021 * drivers. This must be used together with drm_gem_vm_close().
1022 */
drm_gem_vm_open(struct vm_area_struct * vma)1023 void drm_gem_vm_open(struct vm_area_struct *vma)
1024 {
1025 struct drm_gem_object *obj = vma->vm_private_data;
1026
1027 drm_gem_object_get(obj);
1028 }
1029 EXPORT_SYMBOL(drm_gem_vm_open);
1030
1031 /**
1032 * drm_gem_vm_close - vma->ops->close implementation for GEM
1033 * @vma: VM area structure
1034 *
1035 * This function implements the #vm_operations_struct close() callback for GEM
1036 * drivers. This must be used together with drm_gem_vm_open().
1037 */
drm_gem_vm_close(struct vm_area_struct * vma)1038 void drm_gem_vm_close(struct vm_area_struct *vma)
1039 {
1040 struct drm_gem_object *obj = vma->vm_private_data;
1041
1042 drm_gem_object_put(obj);
1043 }
1044 EXPORT_SYMBOL(drm_gem_vm_close);
1045
1046 /**
1047 * drm_gem_mmap_obj - memory map a GEM object
1048 * @obj: the GEM object to map
1049 * @obj_size: the object size to be mapped, in bytes
1050 * @vma: VMA for the area to be mapped
1051 *
1052 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1053 * provided by the driver. Depending on their requirements, drivers can either
1054 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1055 * the object will be trapped, to perform migration, GTT binding, surface
1056 * register allocation, or performance monitoring), or mmap the buffer memory
1057 * synchronously after calling drm_gem_mmap_obj.
1058 *
1059 * This function is mainly intended to implement the DMABUF mmap operation, when
1060 * the GEM object is not looked up based on its fake offset. To implement the
1061 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1062 *
1063 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1064 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1065 * callers must verify access restrictions before calling this helper.
1066 *
1067 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1068 * size, or if no gem_vm_ops are provided.
1069 */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1070 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1071 struct vm_area_struct *vma)
1072 {
1073 struct drm_device *dev = obj->dev;
1074 int ret;
1075
1076 /* Check for valid size. */
1077 if (obj_size < vma->vm_end - vma->vm_start)
1078 return -EINVAL;
1079
1080 /* Take a ref for this mapping of the object, so that the fault
1081 * handler can dereference the mmap offset's pointer to the object.
1082 * This reference is cleaned up by the corresponding vm_close
1083 * (which should happen whether the vma was created by this call, or
1084 * by a vm_open due to mremap or partial unmap or whatever).
1085 */
1086 drm_gem_object_get(obj);
1087
1088 vma->vm_private_data = obj;
1089
1090 if (obj->funcs && obj->funcs->mmap) {
1091 ret = obj->funcs->mmap(obj, vma);
1092 if (ret) {
1093 drm_gem_object_put(obj);
1094 return ret;
1095 }
1096 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1097 } else {
1098 if (obj->funcs && obj->funcs->vm_ops)
1099 vma->vm_ops = obj->funcs->vm_ops;
1100 else if (dev->driver->gem_vm_ops)
1101 vma->vm_ops = dev->driver->gem_vm_ops;
1102 else {
1103 drm_gem_object_put(obj);
1104 return -EINVAL;
1105 }
1106
1107 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1108 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1109 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1110 }
1111
1112 return 0;
1113 }
1114 EXPORT_SYMBOL(drm_gem_mmap_obj);
1115
1116 /**
1117 * drm_gem_mmap - memory map routine for GEM objects
1118 * @filp: DRM file pointer
1119 * @vma: VMA for the area to be mapped
1120 *
1121 * If a driver supports GEM object mapping, mmap calls on the DRM file
1122 * descriptor will end up here.
1123 *
1124 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1125 * contain the fake offset we created when the GTT map ioctl was called on
1126 * the object) and map it with a call to drm_gem_mmap_obj().
1127 *
1128 * If the caller is not granted access to the buffer object, the mmap will fail
1129 * with EACCES. Please see the vma manager for more information.
1130 */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1131 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1132 {
1133 struct drm_file *priv = filp->private_data;
1134 struct drm_device *dev = priv->minor->dev;
1135 struct drm_gem_object *obj = NULL;
1136 struct drm_vma_offset_node *node;
1137 int ret;
1138
1139 if (drm_dev_is_unplugged(dev))
1140 return -ENODEV;
1141
1142 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1143 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1144 vma->vm_pgoff,
1145 vma_pages(vma));
1146 if (likely(node)) {
1147 obj = container_of(node, struct drm_gem_object, vma_node);
1148 /*
1149 * When the object is being freed, after it hits 0-refcnt it
1150 * proceeds to tear down the object. In the process it will
1151 * attempt to remove the VMA offset and so acquire this
1152 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1153 * that matches our range, we know it is in the process of being
1154 * destroyed and will be freed as soon as we release the lock -
1155 * so we have to check for the 0-refcnted object and treat it as
1156 * invalid.
1157 */
1158 if (!kref_get_unless_zero(&obj->refcount))
1159 obj = NULL;
1160 }
1161 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1162
1163 if (!obj)
1164 return -EINVAL;
1165
1166 if (!drm_vma_node_is_allowed(node, priv)) {
1167 drm_gem_object_put(obj);
1168 return -EACCES;
1169 }
1170
1171 if (node->readonly) {
1172 if (vma->vm_flags & VM_WRITE) {
1173 drm_gem_object_put(obj);
1174 return -EINVAL;
1175 }
1176
1177 vma->vm_flags &= ~VM_MAYWRITE;
1178 }
1179
1180 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1181 vma);
1182
1183 drm_gem_object_put(obj);
1184
1185 return ret;
1186 }
1187 EXPORT_SYMBOL(drm_gem_mmap);
1188
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1189 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1190 const struct drm_gem_object *obj)
1191 {
1192 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1193 drm_printf_indent(p, indent, "refcount=%u\n",
1194 kref_read(&obj->refcount));
1195 drm_printf_indent(p, indent, "start=%08lx\n",
1196 drm_vma_node_start(&obj->vma_node));
1197 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1198 drm_printf_indent(p, indent, "imported=%s\n",
1199 obj->import_attach ? "yes" : "no");
1200
1201 if (obj->funcs && obj->funcs->print_info)
1202 obj->funcs->print_info(p, indent, obj);
1203 }
1204
drm_gem_pin(struct drm_gem_object * obj)1205 int drm_gem_pin(struct drm_gem_object *obj)
1206 {
1207 if (obj->funcs && obj->funcs->pin)
1208 return obj->funcs->pin(obj);
1209 else if (obj->dev->driver->gem_prime_pin)
1210 return obj->dev->driver->gem_prime_pin(obj);
1211 else
1212 return 0;
1213 }
1214
drm_gem_unpin(struct drm_gem_object * obj)1215 void drm_gem_unpin(struct drm_gem_object *obj)
1216 {
1217 if (obj->funcs && obj->funcs->unpin)
1218 obj->funcs->unpin(obj);
1219 else if (obj->dev->driver->gem_prime_unpin)
1220 obj->dev->driver->gem_prime_unpin(obj);
1221 }
1222
drm_gem_vmap(struct drm_gem_object * obj)1223 void *drm_gem_vmap(struct drm_gem_object *obj)
1224 {
1225 void *vaddr;
1226
1227 if (obj->funcs && obj->funcs->vmap)
1228 vaddr = obj->funcs->vmap(obj);
1229 else if (obj->dev->driver->gem_prime_vmap)
1230 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1231 else
1232 vaddr = ERR_PTR(-EOPNOTSUPP);
1233
1234 if (!vaddr)
1235 vaddr = ERR_PTR(-ENOMEM);
1236
1237 return vaddr;
1238 }
1239
drm_gem_vunmap(struct drm_gem_object * obj,void * vaddr)1240 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1241 {
1242 if (!vaddr)
1243 return;
1244
1245 if (obj->funcs && obj->funcs->vunmap)
1246 obj->funcs->vunmap(obj, vaddr);
1247 else if (obj->dev->driver->gem_prime_vunmap)
1248 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1249 }
1250
1251 /**
1252 * drm_gem_lock_reservations - Sets up the ww context and acquires
1253 * the lock on an array of GEM objects.
1254 *
1255 * Once you've locked your reservations, you'll want to set up space
1256 * for your shared fences (if applicable), submit your job, then
1257 * drm_gem_unlock_reservations().
1258 *
1259 * @objs: drm_gem_objects to lock
1260 * @count: Number of objects in @objs
1261 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1262 * part of tracking this set of locked reservations.
1263 */
1264 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1265 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1266 struct ww_acquire_ctx *acquire_ctx)
1267 {
1268 int contended = -1;
1269 int i, ret;
1270
1271 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1272
1273 retry:
1274 if (contended != -1) {
1275 struct drm_gem_object *obj = objs[contended];
1276
1277 ret = dma_resv_lock_slow_interruptible(obj->resv,
1278 acquire_ctx);
1279 if (ret) {
1280 ww_acquire_done(acquire_ctx);
1281 return ret;
1282 }
1283 }
1284
1285 for (i = 0; i < count; i++) {
1286 if (i == contended)
1287 continue;
1288
1289 ret = dma_resv_lock_interruptible(objs[i]->resv,
1290 acquire_ctx);
1291 if (ret) {
1292 int j;
1293
1294 for (j = 0; j < i; j++)
1295 dma_resv_unlock(objs[j]->resv);
1296
1297 if (contended != -1 && contended >= i)
1298 dma_resv_unlock(objs[contended]->resv);
1299
1300 if (ret == -EDEADLK) {
1301 contended = i;
1302 goto retry;
1303 }
1304
1305 ww_acquire_done(acquire_ctx);
1306 return ret;
1307 }
1308 }
1309
1310 ww_acquire_done(acquire_ctx);
1311
1312 return 0;
1313 }
1314 EXPORT_SYMBOL(drm_gem_lock_reservations);
1315
1316 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1317 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1318 struct ww_acquire_ctx *acquire_ctx)
1319 {
1320 int i;
1321
1322 for (i = 0; i < count; i++)
1323 dma_resv_unlock(objs[i]->resv);
1324
1325 ww_acquire_fini(acquire_ctx);
1326 }
1327 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1328
1329 /**
1330 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1331 * waited on, deduplicating fences from the same context.
1332 *
1333 * @fence_array: array of dma_fence * for the job to block on.
1334 * @fence: the dma_fence to add to the list of dependencies.
1335 *
1336 * Returns:
1337 * 0 on success, or an error on failing to expand the array.
1338 */
drm_gem_fence_array_add(struct xarray * fence_array,struct dma_fence * fence)1339 int drm_gem_fence_array_add(struct xarray *fence_array,
1340 struct dma_fence *fence)
1341 {
1342 struct dma_fence *entry;
1343 unsigned long index;
1344 u32 id = 0;
1345 int ret;
1346
1347 if (!fence)
1348 return 0;
1349
1350 /* Deduplicate if we already depend on a fence from the same context.
1351 * This lets the size of the array of deps scale with the number of
1352 * engines involved, rather than the number of BOs.
1353 */
1354 xa_for_each(fence_array, index, entry) {
1355 if (entry->context != fence->context)
1356 continue;
1357
1358 if (dma_fence_is_later(fence, entry)) {
1359 dma_fence_put(entry);
1360 xa_store(fence_array, index, fence, GFP_KERNEL);
1361 } else {
1362 dma_fence_put(fence);
1363 }
1364 return 0;
1365 }
1366
1367 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1368 if (ret != 0)
1369 dma_fence_put(fence);
1370
1371 return ret;
1372 }
1373 EXPORT_SYMBOL(drm_gem_fence_array_add);
1374
1375 /**
1376 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1377 * in the GEM object's reservation object to an array of dma_fences for use in
1378 * scheduling a rendering job.
1379 *
1380 * This should be called after drm_gem_lock_reservations() on your array of
1381 * GEM objects used in the job but before updating the reservations with your
1382 * own fences.
1383 *
1384 * @fence_array: array of dma_fence * for the job to block on.
1385 * @obj: the gem object to add new dependencies from.
1386 * @write: whether the job might write the object (so we need to depend on
1387 * shared fences in the reservation object).
1388 */
drm_gem_fence_array_add_implicit(struct xarray * fence_array,struct drm_gem_object * obj,bool write)1389 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1390 struct drm_gem_object *obj,
1391 bool write)
1392 {
1393 int ret;
1394 struct dma_fence **fences;
1395 unsigned int i, fence_count;
1396
1397 if (!write) {
1398 struct dma_fence *fence =
1399 dma_resv_get_excl_rcu(obj->resv);
1400
1401 return drm_gem_fence_array_add(fence_array, fence);
1402 }
1403
1404 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1405 &fence_count, &fences);
1406 if (ret || !fence_count)
1407 return ret;
1408
1409 for (i = 0; i < fence_count; i++) {
1410 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1411 if (ret)
1412 break;
1413 }
1414
1415 for (; i < fence_count; i++)
1416 dma_fence_put(fences[i]);
1417 kfree(fences);
1418 return ret;
1419 }
1420 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1421