1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/module.h>
4
5 #include <drm/drm_debugfs.h>
6 #include <drm/drm_device.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_file.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_framebuffer_helper.h>
11 #include <drm/drm_gem_ttm_helper.h>
12 #include <drm/drm_gem_vram_helper.h>
13 #include <drm/drm_managed.h>
14 #include <drm/drm_mode.h>
15 #include <drm/drm_plane.h>
16 #include <drm/drm_prime.h>
17 #include <drm/drm_simple_kms_helper.h>
18 #include <drm/ttm/ttm_page_alloc.h>
19
20 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
21
22 /**
23 * DOC: overview
24 *
25 * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
26 * buffer object that is backed by video RAM (VRAM). It can be used for
27 * framebuffer devices with dedicated memory.
28 *
29 * The data structure &struct drm_vram_mm and its helpers implement a memory
30 * manager for simple framebuffer devices with dedicated video memory. GEM
31 * VRAM buffer objects are either placed in the video memory or remain evicted
32 * to system memory.
33 *
34 * With the GEM interface userspace applications create, manage and destroy
35 * graphics buffers, such as an on-screen framebuffer. GEM does not provide
36 * an implementation of these interfaces. It's up to the DRM driver to
37 * provide an implementation that suits the hardware. If the hardware device
38 * contains dedicated video memory, the DRM driver can use the VRAM helper
39 * library. Each active buffer object is stored in video RAM. Active
40 * buffer are used for drawing the current frame, typically something like
41 * the frame's scanout buffer or the cursor image. If there's no more space
42 * left in VRAM, inactive GEM objects can be moved to system memory.
43 *
44 * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm().
45 * The function allocates and initializes an instance of &struct drm_vram_mm
46 * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize
47 * &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize
48 * &struct file_operations; as illustrated below.
49 *
50 * .. code-block:: c
51 *
52 * struct file_operations fops ={
53 * .owner = THIS_MODULE,
54 * DRM_VRAM_MM_FILE_OPERATION
55 * };
56 * struct drm_driver drv = {
57 * .driver_feature = DRM_ ... ,
58 * .fops = &fops,
59 * DRM_GEM_VRAM_DRIVER
60 * };
61 *
62 * int init_drm_driver()
63 * {
64 * struct drm_device *dev;
65 * uint64_t vram_base;
66 * unsigned long vram_size;
67 * int ret;
68 *
69 * // setup device, vram base and size
70 * // ...
71 *
72 * ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size);
73 * if (ret)
74 * return ret;
75 * return 0;
76 * }
77 *
78 * This creates an instance of &struct drm_vram_mm, exports DRM userspace
79 * interfaces for GEM buffer management and initializes file operations to
80 * allow for accessing created GEM buffers. With this setup, the DRM driver
81 * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
82 * to userspace.
83 *
84 * You don't have to clean up the instance of VRAM MM.
85 * drmm_vram_helper_alloc_mm() is a managed interface that installs a
86 * clean-up handler to run during the DRM device's release.
87 *
88 * For drawing or scanout operations, rsp. buffer objects have to be pinned
89 * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
90 * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
91 * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
92 *
93 * A buffer object that is pinned in video RAM has a fixed address within that
94 * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
95 * it's used to program the hardware's scanout engine for framebuffers, set
96 * the cursor overlay's image for a mouse cursor, or use it as input to the
97 * hardware's draing engine.
98 *
99 * To access a buffer object's memory from the DRM driver, call
100 * drm_gem_vram_vmap(). It maps the buffer into kernel address
101 * space and returns the memory address. Use drm_gem_vram_vunmap() to
102 * release the mapping.
103 */
104
105 /*
106 * Buffer-objects helpers
107 */
108
drm_gem_vram_cleanup(struct drm_gem_vram_object * gbo)109 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
110 {
111 /* We got here via ttm_bo_put(), which means that the
112 * TTM buffer object in 'bo' has already been cleaned
113 * up; only release the GEM object.
114 */
115
116 WARN_ON(gbo->kmap_use_count);
117 WARN_ON(gbo->kmap.virtual);
118
119 drm_gem_object_release(&gbo->bo.base);
120 }
121
drm_gem_vram_destroy(struct drm_gem_vram_object * gbo)122 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
123 {
124 drm_gem_vram_cleanup(gbo);
125 kfree(gbo);
126 }
127
ttm_buffer_object_destroy(struct ttm_buffer_object * bo)128 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
129 {
130 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
131
132 drm_gem_vram_destroy(gbo);
133 }
134
drm_gem_vram_placement(struct drm_gem_vram_object * gbo,unsigned long pl_flag)135 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
136 unsigned long pl_flag)
137 {
138 u32 invariant_flags = 0;
139 unsigned int i;
140 unsigned int c = 0;
141
142 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
143 invariant_flags = TTM_PL_FLAG_TOPDOWN;
144
145 gbo->placement.placement = gbo->placements;
146 gbo->placement.busy_placement = gbo->placements;
147
148 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
149 gbo->placements[c].mem_type = TTM_PL_VRAM;
150 gbo->placements[c++].flags = TTM_PL_FLAG_WC |
151 TTM_PL_FLAG_UNCACHED |
152 invariant_flags;
153 }
154
155 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
156 gbo->placements[c].mem_type = TTM_PL_SYSTEM;
157 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
158 invariant_flags;
159 }
160
161 gbo->placement.num_placement = c;
162 gbo->placement.num_busy_placement = c;
163
164 for (i = 0; i < c; ++i) {
165 gbo->placements[i].fpfn = 0;
166 gbo->placements[i].lpfn = 0;
167 }
168 }
169
170 /*
171 * Note that on error, drm_gem_vram_init will free the buffer object.
172 */
173
drm_gem_vram_init(struct drm_device * dev,struct drm_gem_vram_object * gbo,size_t size,unsigned long pg_align)174 static int drm_gem_vram_init(struct drm_device *dev,
175 struct drm_gem_vram_object *gbo,
176 size_t size, unsigned long pg_align)
177 {
178 struct drm_vram_mm *vmm = dev->vram_mm;
179 struct ttm_bo_device *bdev;
180 int ret;
181 size_t acc_size;
182
183 if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
184 kfree(gbo);
185 return -EINVAL;
186 }
187 bdev = &vmm->bdev;
188
189 gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
190
191 ret = drm_gem_object_init(dev, &gbo->bo.base, size);
192 if (ret) {
193 kfree(gbo);
194 return ret;
195 }
196
197 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
198
199 gbo->bo.bdev = bdev;
200 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
201 DRM_GEM_VRAM_PL_FLAG_SYSTEM);
202
203 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
204 &gbo->placement, pg_align, false, acc_size,
205 NULL, NULL, ttm_buffer_object_destroy);
206 if (ret)
207 /*
208 * A failing ttm_bo_init will call ttm_buffer_object_destroy
209 * to release gbo->bo.base and kfree gbo.
210 */
211 return ret;
212
213 return 0;
214 }
215
216 /**
217 * drm_gem_vram_create() - Creates a VRAM-backed GEM object
218 * @dev: the DRM device
219 * @size: the buffer size in bytes
220 * @pg_align: the buffer's alignment in multiples of the page size
221 *
222 * Returns:
223 * A new instance of &struct drm_gem_vram_object on success, or
224 * an ERR_PTR()-encoded error code otherwise.
225 */
drm_gem_vram_create(struct drm_device * dev,size_t size,unsigned long pg_align)226 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
227 size_t size,
228 unsigned long pg_align)
229 {
230 struct drm_gem_vram_object *gbo;
231 int ret;
232
233 if (dev->driver->gem_create_object) {
234 struct drm_gem_object *gem =
235 dev->driver->gem_create_object(dev, size);
236 if (!gem)
237 return ERR_PTR(-ENOMEM);
238 gbo = drm_gem_vram_of_gem(gem);
239 } else {
240 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
241 if (!gbo)
242 return ERR_PTR(-ENOMEM);
243 }
244
245 ret = drm_gem_vram_init(dev, gbo, size, pg_align);
246 if (ret < 0)
247 return ERR_PTR(ret);
248
249 return gbo;
250 }
251 EXPORT_SYMBOL(drm_gem_vram_create);
252
253 /**
254 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
255 * @gbo: the GEM VRAM object
256 *
257 * See ttm_bo_put() for more information.
258 */
drm_gem_vram_put(struct drm_gem_vram_object * gbo)259 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
260 {
261 ttm_bo_put(&gbo->bo);
262 }
263 EXPORT_SYMBOL(drm_gem_vram_put);
264
265 /**
266 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
267 * @gbo: the GEM VRAM object
268 *
269 * See drm_vma_node_offset_addr() for more information.
270 *
271 * Returns:
272 * The buffer object's offset for userspace mappings on success, or
273 * 0 if no offset is allocated.
274 */
drm_gem_vram_mmap_offset(struct drm_gem_vram_object * gbo)275 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
276 {
277 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
278 }
279 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
280
drm_gem_vram_pg_offset(struct drm_gem_vram_object * gbo)281 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
282 {
283 /* Keep TTM behavior for now, remove when drivers are audited */
284 if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
285 return 0;
286
287 return gbo->bo.mem.start;
288 }
289
290 /**
291 * drm_gem_vram_offset() - \
292 Returns a GEM VRAM object's offset in video memory
293 * @gbo: the GEM VRAM object
294 *
295 * This function returns the buffer object's offset in the device's video
296 * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
297 *
298 * Returns:
299 * The buffer object's offset in video memory on success, or
300 * a negative errno code otherwise.
301 */
drm_gem_vram_offset(struct drm_gem_vram_object * gbo)302 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
303 {
304 if (WARN_ON_ONCE(!gbo->pin_count))
305 return (s64)-ENODEV;
306 return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
307 }
308 EXPORT_SYMBOL(drm_gem_vram_offset);
309
drm_gem_vram_pin_locked(struct drm_gem_vram_object * gbo,unsigned long pl_flag)310 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
311 unsigned long pl_flag)
312 {
313 int i, ret;
314 struct ttm_operation_ctx ctx = { false, false };
315
316 if (gbo->pin_count)
317 goto out;
318
319 if (pl_flag)
320 drm_gem_vram_placement(gbo, pl_flag);
321
322 for (i = 0; i < gbo->placement.num_placement; ++i)
323 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
324
325 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
326 if (ret < 0)
327 return ret;
328
329 out:
330 ++gbo->pin_count;
331
332 return 0;
333 }
334
335 /**
336 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
337 * @gbo: the GEM VRAM object
338 * @pl_flag: a bitmask of possible memory regions
339 *
340 * Pinning a buffer object ensures that it is not evicted from
341 * a memory region. A pinned buffer object has to be unpinned before
342 * it can be pinned to another region. If the pl_flag argument is 0,
343 * the buffer is pinned at its current location (video RAM or system
344 * memory).
345 *
346 * Small buffer objects, such as cursor images, can lead to memory
347 * fragmentation if they are pinned in the middle of video RAM. This
348 * is especially a problem on devices with only a small amount of
349 * video RAM. Fragmentation can prevent the primary framebuffer from
350 * fitting in, even though there's enough memory overall. The modifier
351 * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
352 * at the high end of the memory region to avoid fragmentation.
353 *
354 * Returns:
355 * 0 on success, or
356 * a negative error code otherwise.
357 */
drm_gem_vram_pin(struct drm_gem_vram_object * gbo,unsigned long pl_flag)358 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
359 {
360 int ret;
361
362 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
363 if (ret)
364 return ret;
365 ret = drm_gem_vram_pin_locked(gbo, pl_flag);
366 ttm_bo_unreserve(&gbo->bo);
367
368 return ret;
369 }
370 EXPORT_SYMBOL(drm_gem_vram_pin);
371
drm_gem_vram_unpin_locked(struct drm_gem_vram_object * gbo)372 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
373 {
374 int i, ret;
375 struct ttm_operation_ctx ctx = { false, false };
376
377 if (WARN_ON_ONCE(!gbo->pin_count))
378 return 0;
379
380 --gbo->pin_count;
381 if (gbo->pin_count)
382 return 0;
383
384 for (i = 0; i < gbo->placement.num_placement ; ++i)
385 gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
386
387 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
388 if (ret < 0)
389 return ret;
390
391 return 0;
392 }
393
394 /**
395 * drm_gem_vram_unpin() - Unpins a GEM VRAM object
396 * @gbo: the GEM VRAM object
397 *
398 * Returns:
399 * 0 on success, or
400 * a negative error code otherwise.
401 */
drm_gem_vram_unpin(struct drm_gem_vram_object * gbo)402 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
403 {
404 int ret;
405
406 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
407 if (ret)
408 return ret;
409 ret = drm_gem_vram_unpin_locked(gbo);
410 ttm_bo_unreserve(&gbo->bo);
411
412 return ret;
413 }
414 EXPORT_SYMBOL(drm_gem_vram_unpin);
415
drm_gem_vram_kmap_locked(struct drm_gem_vram_object * gbo,bool map,bool * is_iomem)416 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
417 bool map, bool *is_iomem)
418 {
419 int ret;
420 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
421
422 if (gbo->kmap_use_count > 0)
423 goto out;
424
425 if (kmap->virtual || !map)
426 goto out;
427
428 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
429 if (ret)
430 return ERR_PTR(ret);
431
432 out:
433 if (!kmap->virtual) {
434 if (is_iomem)
435 *is_iomem = false;
436 return NULL; /* not mapped; don't increment ref */
437 }
438 ++gbo->kmap_use_count;
439 if (is_iomem)
440 return ttm_kmap_obj_virtual(kmap, is_iomem);
441 return kmap->virtual;
442 }
443
drm_gem_vram_kunmap_locked(struct drm_gem_vram_object * gbo)444 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
445 {
446 if (WARN_ON_ONCE(!gbo->kmap_use_count))
447 return;
448 if (--gbo->kmap_use_count > 0)
449 return;
450
451 /*
452 * Permanently mapping and unmapping buffers adds overhead from
453 * updating the page tables and creates debugging output. Therefore,
454 * we delay the actual unmap operation until the BO gets evicted
455 * from memory. See drm_gem_vram_bo_driver_move_notify().
456 */
457 }
458
459 /**
460 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
461 * space
462 * @gbo: The GEM VRAM object to map
463 *
464 * The vmap function pins a GEM VRAM object to its current location, either
465 * system or video memory, and maps its buffer into kernel address space.
466 * As pinned object cannot be relocated, you should avoid pinning objects
467 * permanently. Call drm_gem_vram_vunmap() with the returned address to
468 * unmap and unpin the GEM VRAM object.
469 *
470 * Returns:
471 * The buffer's virtual address on success, or
472 * an ERR_PTR()-encoded error code otherwise.
473 */
drm_gem_vram_vmap(struct drm_gem_vram_object * gbo)474 void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
475 {
476 int ret;
477 void *base;
478
479 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
480 if (ret)
481 return ERR_PTR(ret);
482
483 ret = drm_gem_vram_pin_locked(gbo, 0);
484 if (ret)
485 goto err_ttm_bo_unreserve;
486 base = drm_gem_vram_kmap_locked(gbo, true, NULL);
487 if (IS_ERR(base)) {
488 ret = PTR_ERR(base);
489 goto err_drm_gem_vram_unpin_locked;
490 }
491
492 ttm_bo_unreserve(&gbo->bo);
493
494 return base;
495
496 err_drm_gem_vram_unpin_locked:
497 drm_gem_vram_unpin_locked(gbo);
498 err_ttm_bo_unreserve:
499 ttm_bo_unreserve(&gbo->bo);
500 return ERR_PTR(ret);
501 }
502 EXPORT_SYMBOL(drm_gem_vram_vmap);
503
504 /**
505 * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
506 * @gbo: The GEM VRAM object to unmap
507 * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
508 *
509 * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
510 * the documentation for drm_gem_vram_vmap() for more information.
511 */
drm_gem_vram_vunmap(struct drm_gem_vram_object * gbo,void * vaddr)512 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
513 {
514 int ret;
515
516 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
517 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
518 return;
519
520 drm_gem_vram_kunmap_locked(gbo);
521 drm_gem_vram_unpin_locked(gbo);
522
523 ttm_bo_unreserve(&gbo->bo);
524 }
525 EXPORT_SYMBOL(drm_gem_vram_vunmap);
526
527 /**
528 * drm_gem_vram_fill_create_dumb() - \
529 Helper for implementing &struct drm_driver.dumb_create
530 * @file: the DRM file
531 * @dev: the DRM device
532 * @pg_align: the buffer's alignment in multiples of the page size
533 * @pitch_align: the scanline's alignment in powers of 2
534 * @args: the arguments as provided to \
535 &struct drm_driver.dumb_create
536 *
537 * This helper function fills &struct drm_mode_create_dumb, which is used
538 * by &struct drm_driver.dumb_create. Implementations of this interface
539 * should forwards their arguments to this helper, plus the driver-specific
540 * parameters.
541 *
542 * Returns:
543 * 0 on success, or
544 * a negative error code otherwise.
545 */
drm_gem_vram_fill_create_dumb(struct drm_file * file,struct drm_device * dev,unsigned long pg_align,unsigned long pitch_align,struct drm_mode_create_dumb * args)546 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
547 struct drm_device *dev,
548 unsigned long pg_align,
549 unsigned long pitch_align,
550 struct drm_mode_create_dumb *args)
551 {
552 size_t pitch, size;
553 struct drm_gem_vram_object *gbo;
554 int ret;
555 u32 handle;
556
557 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
558 if (pitch_align) {
559 if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
560 return -EINVAL;
561 pitch = ALIGN(pitch, pitch_align);
562 }
563 size = pitch * args->height;
564
565 size = roundup(size, PAGE_SIZE);
566 if (!size)
567 return -EINVAL;
568
569 gbo = drm_gem_vram_create(dev, size, pg_align);
570 if (IS_ERR(gbo))
571 return PTR_ERR(gbo);
572
573 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
574 if (ret)
575 goto err_drm_gem_object_put;
576
577 drm_gem_object_put(&gbo->bo.base);
578
579 args->pitch = pitch;
580 args->size = size;
581 args->handle = handle;
582
583 return 0;
584
585 err_drm_gem_object_put:
586 drm_gem_object_put(&gbo->bo.base);
587 return ret;
588 }
589 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
590
591 /*
592 * Helpers for struct ttm_bo_driver
593 */
594
drm_is_gem_vram(struct ttm_buffer_object * bo)595 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
596 {
597 return (bo->destroy == ttm_buffer_object_destroy);
598 }
599
drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object * gbo,struct ttm_placement * pl)600 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
601 struct ttm_placement *pl)
602 {
603 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
604 *pl = gbo->placement;
605 }
606
drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object * gbo,bool evict,struct ttm_resource * new_mem)607 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
608 bool evict,
609 struct ttm_resource *new_mem)
610 {
611 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
612
613 if (WARN_ON_ONCE(gbo->kmap_use_count))
614 return;
615
616 if (!kmap->virtual)
617 return;
618 ttm_bo_kunmap(kmap);
619 kmap->virtual = NULL;
620 }
621
622 /*
623 * Helpers for struct drm_gem_object_funcs
624 */
625
626 /**
627 * drm_gem_vram_object_free() - \
628 Implements &struct drm_gem_object_funcs.free
629 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
630 */
drm_gem_vram_object_free(struct drm_gem_object * gem)631 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
632 {
633 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
634
635 drm_gem_vram_put(gbo);
636 }
637
638 /*
639 * Helpers for dump buffers
640 */
641
642 /**
643 * drm_gem_vram_driver_create_dumb() - \
644 Implements &struct drm_driver.dumb_create
645 * @file: the DRM file
646 * @dev: the DRM device
647 * @args: the arguments as provided to \
648 &struct drm_driver.dumb_create
649 *
650 * This function requires the driver to use @drm_device.vram_mm for its
651 * instance of VRAM MM.
652 *
653 * Returns:
654 * 0 on success, or
655 * a negative error code otherwise.
656 */
drm_gem_vram_driver_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)657 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
658 struct drm_device *dev,
659 struct drm_mode_create_dumb *args)
660 {
661 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
662 return -EINVAL;
663
664 return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
665 }
666 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
667
668 /**
669 * drm_gem_vram_driver_dumb_mmap_offset() - \
670 Implements &struct drm_driver.dumb_mmap_offset
671 * @file: DRM file pointer.
672 * @dev: DRM device.
673 * @handle: GEM handle
674 * @offset: Returns the mapping's memory offset on success
675 *
676 * Returns:
677 * 0 on success, or
678 * a negative errno code otherwise.
679 */
drm_gem_vram_driver_dumb_mmap_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)680 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
681 struct drm_device *dev,
682 uint32_t handle, uint64_t *offset)
683 {
684 struct drm_gem_object *gem;
685 struct drm_gem_vram_object *gbo;
686
687 gem = drm_gem_object_lookup(file, handle);
688 if (!gem)
689 return -ENOENT;
690
691 gbo = drm_gem_vram_of_gem(gem);
692 *offset = drm_gem_vram_mmap_offset(gbo);
693
694 drm_gem_object_put(gem);
695
696 return 0;
697 }
698 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
699
700 /*
701 * Helpers for struct drm_plane_helper_funcs
702 */
703
704 /**
705 * drm_gem_vram_plane_helper_prepare_fb() - \
706 * Implements &struct drm_plane_helper_funcs.prepare_fb
707 * @plane: a DRM plane
708 * @new_state: the plane's new state
709 *
710 * During plane updates, this function sets the plane's fence and
711 * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
712 * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
713 *
714 * Returns:
715 * 0 on success, or
716 * a negative errno code otherwise.
717 */
718 int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)719 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
720 struct drm_plane_state *new_state)
721 {
722 size_t i;
723 struct drm_gem_vram_object *gbo;
724 int ret;
725
726 if (!new_state->fb)
727 return 0;
728
729 for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
730 if (!new_state->fb->obj[i])
731 continue;
732 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
733 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
734 if (ret)
735 goto err_drm_gem_vram_unpin;
736 }
737
738 ret = drm_gem_fb_prepare_fb(plane, new_state);
739 if (ret)
740 goto err_drm_gem_vram_unpin;
741
742 return 0;
743
744 err_drm_gem_vram_unpin:
745 while (i) {
746 --i;
747 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
748 drm_gem_vram_unpin(gbo);
749 }
750 return ret;
751 }
752 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
753
754 /**
755 * drm_gem_vram_plane_helper_cleanup_fb() - \
756 * Implements &struct drm_plane_helper_funcs.cleanup_fb
757 * @plane: a DRM plane
758 * @old_state: the plane's old state
759 *
760 * During plane updates, this function unpins the GEM VRAM
761 * objects of the plane's old framebuffer from VRAM. Complements
762 * drm_gem_vram_plane_helper_prepare_fb().
763 */
764 void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)765 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
766 struct drm_plane_state *old_state)
767 {
768 size_t i;
769 struct drm_gem_vram_object *gbo;
770
771 if (!old_state->fb)
772 return;
773
774 for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
775 if (!old_state->fb->obj[i])
776 continue;
777 gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
778 drm_gem_vram_unpin(gbo);
779 }
780 }
781 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
782
783 /*
784 * Helpers for struct drm_simple_display_pipe_funcs
785 */
786
787 /**
788 * drm_gem_vram_simple_display_pipe_prepare_fb() - \
789 * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
790 * @pipe: a simple display pipe
791 * @new_state: the plane's new state
792 *
793 * During plane updates, this function pins the GEM VRAM
794 * objects of the plane's new framebuffer to VRAM. Call
795 * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
796 *
797 * Returns:
798 * 0 on success, or
799 * a negative errno code otherwise.
800 */
drm_gem_vram_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe * pipe,struct drm_plane_state * new_state)801 int drm_gem_vram_simple_display_pipe_prepare_fb(
802 struct drm_simple_display_pipe *pipe,
803 struct drm_plane_state *new_state)
804 {
805 return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
806 }
807 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
808
809 /**
810 * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
811 * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
812 * @pipe: a simple display pipe
813 * @old_state: the plane's old state
814 *
815 * During plane updates, this function unpins the GEM VRAM
816 * objects of the plane's old framebuffer from VRAM. Complements
817 * drm_gem_vram_simple_display_pipe_prepare_fb().
818 */
drm_gem_vram_simple_display_pipe_cleanup_fb(struct drm_simple_display_pipe * pipe,struct drm_plane_state * old_state)819 void drm_gem_vram_simple_display_pipe_cleanup_fb(
820 struct drm_simple_display_pipe *pipe,
821 struct drm_plane_state *old_state)
822 {
823 drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
824 }
825 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
826
827 /*
828 * PRIME helpers
829 */
830
831 /**
832 * drm_gem_vram_object_pin() - \
833 Implements &struct drm_gem_object_funcs.pin
834 * @gem: The GEM object to pin
835 *
836 * Returns:
837 * 0 on success, or
838 * a negative errno code otherwise.
839 */
drm_gem_vram_object_pin(struct drm_gem_object * gem)840 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
841 {
842 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
843
844 /* Fbdev console emulation is the use case of these PRIME
845 * helpers. This may involve updating a hardware buffer from
846 * a shadow FB. We pin the buffer to it's current location
847 * (either video RAM or system memory) to prevent it from
848 * being relocated during the update operation. If you require
849 * the buffer to be pinned to VRAM, implement a callback that
850 * sets the flags accordingly.
851 */
852 return drm_gem_vram_pin(gbo, 0);
853 }
854
855 /**
856 * drm_gem_vram_object_unpin() - \
857 Implements &struct drm_gem_object_funcs.unpin
858 * @gem: The GEM object to unpin
859 */
drm_gem_vram_object_unpin(struct drm_gem_object * gem)860 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
861 {
862 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
863
864 drm_gem_vram_unpin(gbo);
865 }
866
867 /**
868 * drm_gem_vram_object_vmap() - \
869 Implements &struct drm_gem_object_funcs.vmap
870 * @gem: The GEM object to map
871 *
872 * Returns:
873 * The buffers virtual address on success, or
874 * NULL otherwise.
875 */
drm_gem_vram_object_vmap(struct drm_gem_object * gem)876 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
877 {
878 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
879 void *base;
880
881 base = drm_gem_vram_vmap(gbo);
882 if (IS_ERR(base))
883 return NULL;
884 return base;
885 }
886
887 /**
888 * drm_gem_vram_object_vunmap() - \
889 Implements &struct drm_gem_object_funcs.vunmap
890 * @gem: The GEM object to unmap
891 * @vaddr: The mapping's base address
892 */
drm_gem_vram_object_vunmap(struct drm_gem_object * gem,void * vaddr)893 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
894 void *vaddr)
895 {
896 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
897
898 drm_gem_vram_vunmap(gbo, vaddr);
899 }
900
901 /*
902 * GEM object funcs
903 */
904
905 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
906 .free = drm_gem_vram_object_free,
907 .pin = drm_gem_vram_object_pin,
908 .unpin = drm_gem_vram_object_unpin,
909 .vmap = drm_gem_vram_object_vmap,
910 .vunmap = drm_gem_vram_object_vunmap,
911 .mmap = drm_gem_ttm_mmap,
912 .print_info = drm_gem_ttm_print_info,
913 };
914
915 /*
916 * VRAM memory manager
917 */
918
919 /*
920 * TTM TT
921 */
922
bo_driver_ttm_tt_destroy(struct ttm_bo_device * bdev,struct ttm_tt * tt)923 static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
924 {
925 ttm_tt_destroy_common(bdev, tt);
926 ttm_tt_fini(tt);
927 kfree(tt);
928 }
929
930 /*
931 * TTM BO device
932 */
933
bo_driver_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)934 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
935 uint32_t page_flags)
936 {
937 struct ttm_tt *tt;
938 int ret;
939
940 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
941 if (!tt)
942 return NULL;
943
944 ret = ttm_tt_init(tt, bo, page_flags);
945 if (ret < 0)
946 goto err_ttm_tt_init;
947
948 return tt;
949
950 err_ttm_tt_init:
951 kfree(tt);
952 return NULL;
953 }
954
bo_driver_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)955 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
956 struct ttm_placement *placement)
957 {
958 struct drm_gem_vram_object *gbo;
959
960 /* TTM may pass BOs that are not GEM VRAM BOs. */
961 if (!drm_is_gem_vram(bo))
962 return;
963
964 gbo = drm_gem_vram_of_bo(bo);
965
966 drm_gem_vram_bo_driver_evict_flags(gbo, placement);
967 }
968
bo_driver_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem)969 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
970 bool evict,
971 struct ttm_resource *new_mem)
972 {
973 struct drm_gem_vram_object *gbo;
974
975 /* TTM may pass BOs that are not GEM VRAM BOs. */
976 if (!drm_is_gem_vram(bo))
977 return;
978
979 gbo = drm_gem_vram_of_bo(bo);
980
981 drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
982 }
983
bo_driver_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_resource * mem)984 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
985 struct ttm_resource *mem)
986 {
987 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
988
989 switch (mem->mem_type) {
990 case TTM_PL_SYSTEM: /* nothing to do */
991 break;
992 case TTM_PL_VRAM:
993 mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
994 mem->bus.is_iomem = true;
995 break;
996 default:
997 return -EINVAL;
998 }
999
1000 return 0;
1001 }
1002
1003 static struct ttm_bo_driver bo_driver = {
1004 .ttm_tt_create = bo_driver_ttm_tt_create,
1005 .ttm_tt_destroy = bo_driver_ttm_tt_destroy,
1006 .eviction_valuable = ttm_bo_eviction_valuable,
1007 .evict_flags = bo_driver_evict_flags,
1008 .move_notify = bo_driver_move_notify,
1009 .io_mem_reserve = bo_driver_io_mem_reserve,
1010 };
1011
1012 /*
1013 * struct drm_vram_mm
1014 */
1015
drm_vram_mm_debugfs(struct seq_file * m,void * data)1016 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
1017 {
1018 struct drm_info_node *node = (struct drm_info_node *) m->private;
1019 struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
1020 struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
1021 struct drm_printer p = drm_seq_file_printer(m);
1022
1023 ttm_resource_manager_debug(man, &p);
1024 return 0;
1025 }
1026
1027 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
1028 { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
1029 };
1030
1031 /**
1032 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
1033 *
1034 * @minor: drm minor device.
1035 *
1036 */
drm_vram_mm_debugfs_init(struct drm_minor * minor)1037 void drm_vram_mm_debugfs_init(struct drm_minor *minor)
1038 {
1039 drm_debugfs_create_files(drm_vram_mm_debugfs_list,
1040 ARRAY_SIZE(drm_vram_mm_debugfs_list),
1041 minor->debugfs_root, minor);
1042 }
1043 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
1044
drm_vram_mm_init(struct drm_vram_mm * vmm,struct drm_device * dev,uint64_t vram_base,size_t vram_size)1045 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
1046 uint64_t vram_base, size_t vram_size)
1047 {
1048 int ret;
1049
1050 vmm->vram_base = vram_base;
1051 vmm->vram_size = vram_size;
1052
1053 ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
1054 dev->anon_inode->i_mapping,
1055 dev->vma_offset_manager,
1056 true);
1057 if (ret)
1058 return ret;
1059
1060 ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
1061 false, vram_size >> PAGE_SHIFT);
1062 if (ret)
1063 return ret;
1064
1065 return 0;
1066 }
1067
drm_vram_mm_cleanup(struct drm_vram_mm * vmm)1068 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
1069 {
1070 ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
1071 ttm_bo_device_release(&vmm->bdev);
1072 }
1073
1074 /*
1075 * Helpers for integration with struct drm_device
1076 */
1077
1078 /* deprecated; use drmm_vram_mm_init() */
drm_vram_helper_alloc_mm(struct drm_device * dev,uint64_t vram_base,size_t vram_size)1079 struct drm_vram_mm *drm_vram_helper_alloc_mm(
1080 struct drm_device *dev, uint64_t vram_base, size_t vram_size)
1081 {
1082 int ret;
1083
1084 if (WARN_ON(dev->vram_mm))
1085 return dev->vram_mm;
1086
1087 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
1088 if (!dev->vram_mm)
1089 return ERR_PTR(-ENOMEM);
1090
1091 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
1092 if (ret)
1093 goto err_kfree;
1094
1095 return dev->vram_mm;
1096
1097 err_kfree:
1098 kfree(dev->vram_mm);
1099 dev->vram_mm = NULL;
1100 return ERR_PTR(ret);
1101 }
1102 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
1103
drm_vram_helper_release_mm(struct drm_device * dev)1104 void drm_vram_helper_release_mm(struct drm_device *dev)
1105 {
1106 if (!dev->vram_mm)
1107 return;
1108
1109 drm_vram_mm_cleanup(dev->vram_mm);
1110 kfree(dev->vram_mm);
1111 dev->vram_mm = NULL;
1112 }
1113 EXPORT_SYMBOL(drm_vram_helper_release_mm);
1114
drm_vram_mm_release(struct drm_device * dev,void * ptr)1115 static void drm_vram_mm_release(struct drm_device *dev, void *ptr)
1116 {
1117 drm_vram_helper_release_mm(dev);
1118 }
1119
1120 /**
1121 * drmm_vram_helper_init - Initializes a device's instance of
1122 * &struct drm_vram_mm
1123 * @dev: the DRM device
1124 * @vram_base: the base address of the video memory
1125 * @vram_size: the size of the video memory in bytes
1126 *
1127 * Creates a new instance of &struct drm_vram_mm and stores it in
1128 * struct &drm_device.vram_mm. The instance is auto-managed and cleaned
1129 * up as part of device cleanup. Calling this function multiple times
1130 * will generate an error message.
1131 *
1132 * Returns:
1133 * 0 on success, or a negative errno code otherwise.
1134 */
drmm_vram_helper_init(struct drm_device * dev,uint64_t vram_base,size_t vram_size)1135 int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
1136 size_t vram_size)
1137 {
1138 struct drm_vram_mm *vram_mm;
1139
1140 if (drm_WARN_ON_ONCE(dev, dev->vram_mm))
1141 return 0;
1142
1143 vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
1144 if (IS_ERR(vram_mm))
1145 return PTR_ERR(vram_mm);
1146 return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL);
1147 }
1148 EXPORT_SYMBOL(drmm_vram_helper_init);
1149
1150 /*
1151 * Mode-config helpers
1152 */
1153
1154 static enum drm_mode_status
drm_vram_helper_mode_valid_internal(struct drm_device * dev,const struct drm_display_mode * mode,unsigned long max_bpp)1155 drm_vram_helper_mode_valid_internal(struct drm_device *dev,
1156 const struct drm_display_mode *mode,
1157 unsigned long max_bpp)
1158 {
1159 struct drm_vram_mm *vmm = dev->vram_mm;
1160 unsigned long fbsize, fbpages, max_fbpages;
1161
1162 if (WARN_ON(!dev->vram_mm))
1163 return MODE_BAD;
1164
1165 max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
1166
1167 fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
1168 fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
1169
1170 if (fbpages > max_fbpages)
1171 return MODE_MEM;
1172
1173 return MODE_OK;
1174 }
1175
1176 /**
1177 * drm_vram_helper_mode_valid - Tests if a display mode's
1178 * framebuffer fits into the available video memory.
1179 * @dev: the DRM device
1180 * @mode: the mode to test
1181 *
1182 * This function tests if enough video memory is available for using the
1183 * specified display mode. Atomic modesetting requires importing the
1184 * designated framebuffer into video memory before evicting the active
1185 * one. Hence, any framebuffer may consume at most half of the available
1186 * VRAM. Display modes that require a larger framebuffer can not be used,
1187 * even if the CRTC does support them. Each framebuffer is assumed to
1188 * have 32-bit color depth.
1189 *
1190 * Note:
1191 * The function can only test if the display mode is supported in
1192 * general. If there are too many framebuffers pinned to video memory,
1193 * a display mode may still not be usable in practice. The color depth of
1194 * 32-bit fits all current use case. A more flexible test can be added
1195 * when necessary.
1196 *
1197 * Returns:
1198 * MODE_OK if the display mode is supported, or an error code of type
1199 * enum drm_mode_status otherwise.
1200 */
1201 enum drm_mode_status
drm_vram_helper_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)1202 drm_vram_helper_mode_valid(struct drm_device *dev,
1203 const struct drm_display_mode *mode)
1204 {
1205 static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
1206
1207 return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
1208 }
1209 EXPORT_SYMBOL(drm_vram_helper_mode_valid);
1210
1211 MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
1212 MODULE_LICENSE("GPL");
1213