1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include <drm/drm.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
19
20 /**
21 * DOC: overview
22 *
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
25 */
26
27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
35 .vm_ops = &drm_gem_shmem_vm_ops,
36 };
37
38 /**
39 * drm_gem_shmem_create - Allocate an object with the given size
40 * @dev: DRM device
41 * @size: Size of the object to allocate
42 *
43 * This function creates a shmem GEM object.
44 *
45 * Returns:
46 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
47 * error code on failure.
48 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)49 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
50 {
51 struct drm_gem_shmem_object *shmem;
52 struct drm_gem_object *obj;
53 int ret;
54
55 size = PAGE_ALIGN(size);
56
57 if (dev->driver->gem_create_object)
58 obj = dev->driver->gem_create_object(dev, size);
59 else
60 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
61 if (!obj)
62 return ERR_PTR(-ENOMEM);
63
64 if (!obj->funcs)
65 obj->funcs = &drm_gem_shmem_funcs;
66
67 ret = drm_gem_object_init(dev, obj, size);
68 if (ret)
69 goto err_free;
70
71 ret = drm_gem_create_mmap_offset(obj);
72 if (ret)
73 goto err_release;
74
75 shmem = to_drm_gem_shmem_obj(obj);
76 mutex_init(&shmem->pages_lock);
77 mutex_init(&shmem->vmap_lock);
78 INIT_LIST_HEAD(&shmem->madv_list);
79
80 /*
81 * Our buffers are kept pinned, so allocating them
82 * from the MOVABLE zone is a really bad idea, and
83 * conflicts with CMA. See comments above new_inode()
84 * why this is required _and_ expected if you're
85 * going to pin these pages.
86 */
87 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
88 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
89
90 return shmem;
91
92 err_release:
93 drm_gem_object_release(obj);
94 err_free:
95 kfree(obj);
96
97 return ERR_PTR(ret);
98 }
99 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
100
101 /**
102 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
103 * @obj: GEM object to free
104 *
105 * This function cleans up the GEM object state and frees the memory used to
106 * store the object itself.
107 */
drm_gem_shmem_free_object(struct drm_gem_object * obj)108 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
109 {
110 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
111
112 WARN_ON(shmem->vmap_use_count);
113
114 if (obj->import_attach) {
115 shmem->pages_use_count--;
116 drm_prime_gem_destroy(obj, shmem->sgt);
117 kvfree(shmem->pages);
118 } else {
119 if (shmem->sgt) {
120 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
121 shmem->sgt->nents, DMA_BIDIRECTIONAL);
122 sg_free_table(shmem->sgt);
123 kfree(shmem->sgt);
124 }
125 if (shmem->pages)
126 drm_gem_shmem_put_pages(shmem);
127 }
128
129 WARN_ON(shmem->pages_use_count);
130
131 drm_gem_object_release(obj);
132 mutex_destroy(&shmem->pages_lock);
133 mutex_destroy(&shmem->vmap_lock);
134 kfree(shmem);
135 }
136 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
137
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)138 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
139 {
140 struct drm_gem_object *obj = &shmem->base;
141 struct page **pages;
142
143 if (shmem->pages_use_count++ > 0)
144 return 0;
145
146 pages = drm_gem_get_pages(obj);
147 if (IS_ERR(pages)) {
148 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
149 shmem->pages_use_count = 0;
150 return PTR_ERR(pages);
151 }
152
153 shmem->pages = pages;
154
155 return 0;
156 }
157
158 /*
159 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
160 * @shmem: shmem GEM object
161 *
162 * This function makes sure that backing pages exists for the shmem GEM object
163 * and increases the use count.
164 *
165 * Returns:
166 * 0 on success or a negative error code on failure.
167 */
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)168 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
169 {
170 int ret;
171
172 ret = mutex_lock_interruptible(&shmem->pages_lock);
173 if (ret)
174 return ret;
175 ret = drm_gem_shmem_get_pages_locked(shmem);
176 mutex_unlock(&shmem->pages_lock);
177
178 return ret;
179 }
180 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
181
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)182 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
183 {
184 struct drm_gem_object *obj = &shmem->base;
185
186 if (WARN_ON_ONCE(!shmem->pages_use_count))
187 return;
188
189 if (--shmem->pages_use_count > 0)
190 return;
191
192 drm_gem_put_pages(obj, shmem->pages,
193 shmem->pages_mark_dirty_on_put,
194 shmem->pages_mark_accessed_on_put);
195 shmem->pages = NULL;
196 }
197
198 /*
199 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
200 * @shmem: shmem GEM object
201 *
202 * This function decreases the use count and puts the backing pages when use drops to zero.
203 */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)204 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
205 {
206 mutex_lock(&shmem->pages_lock);
207 drm_gem_shmem_put_pages_locked(shmem);
208 mutex_unlock(&shmem->pages_lock);
209 }
210 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
211
212 /**
213 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
214 * @obj: GEM object
215 *
216 * This function makes sure the backing pages are pinned in memory while the
217 * buffer is exported.
218 *
219 * Returns:
220 * 0 on success or a negative error code on failure.
221 */
drm_gem_shmem_pin(struct drm_gem_object * obj)222 int drm_gem_shmem_pin(struct drm_gem_object *obj)
223 {
224 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
225
226 return drm_gem_shmem_get_pages(shmem);
227 }
228 EXPORT_SYMBOL(drm_gem_shmem_pin);
229
230 /**
231 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
232 * @obj: GEM object
233 *
234 * This function removes the requirement that the backing pages are pinned in
235 * memory.
236 */
drm_gem_shmem_unpin(struct drm_gem_object * obj)237 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
238 {
239 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
240
241 drm_gem_shmem_put_pages(shmem);
242 }
243 EXPORT_SYMBOL(drm_gem_shmem_unpin);
244
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem)245 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
246 {
247 struct drm_gem_object *obj = &shmem->base;
248 int ret;
249
250 if (shmem->vmap_use_count++ > 0)
251 return shmem->vaddr;
252
253 ret = drm_gem_shmem_get_pages(shmem);
254 if (ret)
255 goto err_zero_use;
256
257 if (obj->import_attach)
258 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
259 else
260 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
261 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
262
263 if (!shmem->vaddr) {
264 DRM_DEBUG_KMS("Failed to vmap pages\n");
265 ret = -ENOMEM;
266 goto err_put_pages;
267 }
268
269 return shmem->vaddr;
270
271 err_put_pages:
272 drm_gem_shmem_put_pages(shmem);
273 err_zero_use:
274 shmem->vmap_use_count = 0;
275
276 return ERR_PTR(ret);
277 }
278
279 /*
280 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
281 * @shmem: shmem GEM object
282 *
283 * This function makes sure that a virtual address exists for the buffer backing
284 * the shmem GEM object.
285 *
286 * Returns:
287 * 0 on success or a negative error code on failure.
288 */
drm_gem_shmem_vmap(struct drm_gem_object * obj)289 void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
290 {
291 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
292 void *vaddr;
293 int ret;
294
295 ret = mutex_lock_interruptible(&shmem->vmap_lock);
296 if (ret)
297 return ERR_PTR(ret);
298 vaddr = drm_gem_shmem_vmap_locked(shmem);
299 mutex_unlock(&shmem->vmap_lock);
300
301 return vaddr;
302 }
303 EXPORT_SYMBOL(drm_gem_shmem_vmap);
304
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem)305 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
306 {
307 struct drm_gem_object *obj = &shmem->base;
308
309 if (WARN_ON_ONCE(!shmem->vmap_use_count))
310 return;
311
312 if (--shmem->vmap_use_count > 0)
313 return;
314
315 if (obj->import_attach)
316 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
317 else
318 vunmap(shmem->vaddr);
319
320 shmem->vaddr = NULL;
321 drm_gem_shmem_put_pages(shmem);
322 }
323
324 /*
325 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
326 * @shmem: shmem GEM object
327 *
328 * This function removes the virtual address when use count drops to zero.
329 */
drm_gem_shmem_vunmap(struct drm_gem_object * obj,void * vaddr)330 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
331 {
332 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
333
334 mutex_lock(&shmem->vmap_lock);
335 drm_gem_shmem_vunmap_locked(shmem);
336 mutex_unlock(&shmem->vmap_lock);
337 }
338 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
339
340 struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)341 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
342 struct drm_device *dev, size_t size,
343 uint32_t *handle)
344 {
345 struct drm_gem_shmem_object *shmem;
346 int ret;
347
348 shmem = drm_gem_shmem_create(dev, size);
349 if (IS_ERR(shmem))
350 return shmem;
351
352 /*
353 * Allocate an id of idr table where the obj is registered
354 * and handle has the id what user can see.
355 */
356 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
357 /* drop reference from allocate - handle holds it now. */
358 drm_gem_object_put_unlocked(&shmem->base);
359 if (ret)
360 return ERR_PTR(ret);
361
362 return shmem;
363 }
364 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
365
366 /* Update madvise status, returns true if not purged, else
367 * false or -errno.
368 */
drm_gem_shmem_madvise(struct drm_gem_object * obj,int madv)369 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
370 {
371 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
372
373 mutex_lock(&shmem->pages_lock);
374
375 if (shmem->madv >= 0)
376 shmem->madv = madv;
377
378 madv = shmem->madv;
379
380 mutex_unlock(&shmem->pages_lock);
381
382 return (madv >= 0);
383 }
384 EXPORT_SYMBOL(drm_gem_shmem_madvise);
385
drm_gem_shmem_purge_locked(struct drm_gem_object * obj)386 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
387 {
388 struct drm_device *dev = obj->dev;
389 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
390
391 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
392
393 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
394 shmem->sgt->nents, DMA_BIDIRECTIONAL);
395 sg_free_table(shmem->sgt);
396 kfree(shmem->sgt);
397 shmem->sgt = NULL;
398
399 drm_gem_shmem_put_pages_locked(shmem);
400
401 shmem->madv = -1;
402
403 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
404 drm_gem_free_mmap_offset(obj);
405
406 /* Our goal here is to return as much of the memory as
407 * is possible back to the system as we are called from OOM.
408 * To do this we must instruct the shmfs to drop all of its
409 * backing pages, *now*.
410 */
411 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
412
413 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
414 0, (loff_t)-1);
415 }
416 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
417
drm_gem_shmem_purge(struct drm_gem_object * obj)418 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
419 {
420 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
421
422 if (!mutex_trylock(&shmem->pages_lock))
423 return false;
424 drm_gem_shmem_purge_locked(obj);
425 mutex_unlock(&shmem->pages_lock);
426
427 return true;
428 }
429 EXPORT_SYMBOL(drm_gem_shmem_purge);
430
431 /**
432 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
433 * @file: DRM file structure to create the dumb buffer for
434 * @dev: DRM device
435 * @args: IOCTL data
436 *
437 * This function computes the pitch of the dumb buffer and rounds it up to an
438 * integer number of bytes per pixel. Drivers for hardware that doesn't have
439 * any additional restrictions on the pitch can directly use this function as
440 * their &drm_driver.dumb_create callback.
441 *
442 * For hardware with additional restrictions, drivers can adjust the fields
443 * set up by userspace before calling into this function.
444 *
445 * Returns:
446 * 0 on success or a negative error code on failure.
447 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)448 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
449 struct drm_mode_create_dumb *args)
450 {
451 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
452 struct drm_gem_shmem_object *shmem;
453
454 if (!args->pitch || !args->size) {
455 args->pitch = min_pitch;
456 args->size = args->pitch * args->height;
457 } else {
458 /* ensure sane minimum values */
459 if (args->pitch < min_pitch)
460 args->pitch = min_pitch;
461 if (args->size < args->pitch * args->height)
462 args->size = args->pitch * args->height;
463 }
464
465 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
466
467 return PTR_ERR_OR_ZERO(shmem);
468 }
469 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
470
drm_gem_shmem_fault(struct vm_fault * vmf)471 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
472 {
473 struct vm_area_struct *vma = vmf->vma;
474 struct drm_gem_object *obj = vma->vm_private_data;
475 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
476 loff_t num_pages = obj->size >> PAGE_SHIFT;
477 struct page *page;
478
479 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
480 return VM_FAULT_SIGBUS;
481
482 page = shmem->pages[vmf->pgoff];
483
484 return vmf_insert_page(vma, vmf->address, page);
485 }
486
drm_gem_shmem_vm_open(struct vm_area_struct * vma)487 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
488 {
489 struct drm_gem_object *obj = vma->vm_private_data;
490 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
491 int ret;
492
493 ret = drm_gem_shmem_get_pages(shmem);
494 WARN_ON_ONCE(ret != 0);
495
496 drm_gem_vm_open(vma);
497 }
498
drm_gem_shmem_vm_close(struct vm_area_struct * vma)499 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
500 {
501 struct drm_gem_object *obj = vma->vm_private_data;
502 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
503
504 drm_gem_shmem_put_pages(shmem);
505 drm_gem_vm_close(vma);
506 }
507
508 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
509 .fault = drm_gem_shmem_fault,
510 .open = drm_gem_shmem_vm_open,
511 .close = drm_gem_shmem_vm_close,
512 };
513 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
514
515 /**
516 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
517 * @filp: File object
518 * @vma: VMA for the area to be mapped
519 *
520 * This function implements an augmented version of the GEM DRM file mmap
521 * operation for shmem objects. Drivers which employ the shmem helpers should
522 * use this function as their &file_operations.mmap handler in the DRM device file's
523 * file_operations structure.
524 *
525 * Instead of directly referencing this function, drivers should use the
526 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
527 *
528 * Returns:
529 * 0 on success or a negative error code on failure.
530 */
drm_gem_shmem_mmap(struct file * filp,struct vm_area_struct * vma)531 int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
532 {
533 struct drm_gem_shmem_object *shmem;
534 int ret;
535
536 ret = drm_gem_mmap(filp, vma);
537 if (ret)
538 return ret;
539
540 shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
541
542 ret = drm_gem_shmem_get_pages(shmem);
543 if (ret) {
544 drm_gem_vm_close(vma);
545 return ret;
546 }
547
548 /* VM_PFNMAP was set by drm_gem_mmap() */
549 vma->vm_flags &= ~VM_PFNMAP;
550 vma->vm_flags |= VM_MIXEDMAP;
551
552 /* Remove the fake offset */
553 vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
554
555 return 0;
556 }
557 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
558
559 /**
560 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
561 * @p: DRM printer
562 * @indent: Tab indentation level
563 * @obj: GEM object
564 */
drm_gem_shmem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)565 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
566 const struct drm_gem_object *obj)
567 {
568 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
569
570 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
571 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
572 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
573 }
574 EXPORT_SYMBOL(drm_gem_shmem_print_info);
575
576 /**
577 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
578 * pages for a shmem GEM object
579 * @obj: GEM object
580 *
581 * This function exports a scatter/gather table suitable for PRIME usage by
582 * calling the standard DMA mapping API.
583 *
584 * Returns:
585 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
586 */
drm_gem_shmem_get_sg_table(struct drm_gem_object * obj)587 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
588 {
589 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590
591 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
592 }
593 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
594
595 /**
596 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
597 * scatter/gather table for a shmem GEM object.
598 * @obj: GEM object
599 *
600 * This function returns a scatter/gather table suitable for driver usage. If
601 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
602 * table created.
603 *
604 * Returns:
605 * A pointer to the scatter/gather table of pinned pages or errno on failure.
606 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_object * obj)607 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
608 {
609 int ret;
610 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
611 struct sg_table *sgt;
612
613 if (shmem->sgt)
614 return shmem->sgt;
615
616 WARN_ON(obj->import_attach);
617
618 ret = drm_gem_shmem_get_pages(shmem);
619 if (ret)
620 return ERR_PTR(ret);
621
622 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
623 if (IS_ERR(sgt)) {
624 ret = PTR_ERR(sgt);
625 goto err_put_pages;
626 }
627 /* Map the pages for use by the h/w. */
628 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
629
630 shmem->sgt = sgt;
631
632 return sgt;
633
634 err_put_pages:
635 drm_gem_shmem_put_pages(shmem);
636 return ERR_PTR(ret);
637 }
638 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
639
640 /**
641 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
642 * another driver's scatter/gather table of pinned pages
643 * @dev: Device to import into
644 * @attach: DMA-BUF attachment
645 * @sgt: Scatter/gather table of pinned pages
646 *
647 * This function imports a scatter/gather table exported via DMA-BUF by
648 * another driver. Drivers that use the shmem helpers should set this as their
649 * &drm_driver.gem_prime_import_sg_table callback.
650 *
651 * Returns:
652 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
653 * error code on failure.
654 */
655 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)656 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
657 struct dma_buf_attachment *attach,
658 struct sg_table *sgt)
659 {
660 size_t size = PAGE_ALIGN(attach->dmabuf->size);
661 size_t npages = size >> PAGE_SHIFT;
662 struct drm_gem_shmem_object *shmem;
663 int ret;
664
665 shmem = drm_gem_shmem_create(dev, size);
666 if (IS_ERR(shmem))
667 return ERR_CAST(shmem);
668
669 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
670 if (!shmem->pages) {
671 ret = -ENOMEM;
672 goto err_free_gem;
673 }
674
675 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
676 if (ret < 0)
677 goto err_free_array;
678
679 shmem->sgt = sgt;
680 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
681
682 DRM_DEBUG_PRIME("size = %zu\n", size);
683
684 return &shmem->base;
685
686 err_free_array:
687 kvfree(shmem->pages);
688 err_free_gem:
689 drm_gem_object_put_unlocked(&shmem->base);
690
691 return ERR_PTR(ret);
692 }
693 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
694