1 /*
2 * \file drm_vm.c
3 * Memory mapping for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9 /*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
40
41 #if defined(__ia64__)
42 #include <linux/efi.h>
43 #include <linux/slab.h>
44 #endif
45 #include <linux/mem_encrypt.h>
46
47 #include <asm/pgtable.h>
48
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_gem.h>
55 #include <drm/drm_print.h>
56
57 #include "drm_internal.h"
58 #include "drm_legacy.h"
59
60 struct drm_vma_entry {
61 struct list_head head;
62 struct vm_area_struct *vma;
63 pid_t pid;
64 };
65
66 static void drm_vm_open(struct vm_area_struct *vma);
67 static void drm_vm_close(struct vm_area_struct *vma);
68
drm_io_prot(struct drm_local_map * map,struct vm_area_struct * vma)69 static pgprot_t drm_io_prot(struct drm_local_map *map,
70 struct vm_area_struct *vma)
71 {
72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73
74 /* We don't want graphics memory to be mapped encrypted */
75 tmp = pgprot_decrypted(tmp);
76
77 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
78 defined(__mips__)
79 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
80 tmp = pgprot_noncached(tmp);
81 else
82 tmp = pgprot_writecombine(tmp);
83 #elif defined(__ia64__)
84 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
85 vma->vm_start))
86 tmp = pgprot_writecombine(tmp);
87 else
88 tmp = pgprot_noncached(tmp);
89 #elif defined(__sparc__) || defined(__arm__)
90 tmp = pgprot_noncached(tmp);
91 #endif
92 return tmp;
93 }
94
drm_dma_prot(uint32_t map_type,struct vm_area_struct * vma)95 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
96 {
97 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
98
99 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
100 tmp = pgprot_noncached_wc(tmp);
101 #endif
102 return tmp;
103 }
104
105 /**
106 * \c fault method for AGP virtual memory.
107 *
108 * \param vma virtual memory area.
109 * \param address access address.
110 * \return pointer to the page structure.
111 *
112 * Find the right map and if it's AGP memory find the real physical page to
113 * map, get the page, increment the use count and return it.
114 */
115 #if IS_ENABLED(CONFIG_AGP)
drm_vm_fault(struct vm_fault * vmf)116 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
117 {
118 struct vm_area_struct *vma = vmf->vma;
119 struct drm_file *priv = vma->vm_file->private_data;
120 struct drm_device *dev = priv->minor->dev;
121 struct drm_local_map *map = NULL;
122 struct drm_map_list *r_list;
123 struct drm_hash_item *hash;
124
125 /*
126 * Find the right map
127 */
128 if (!dev->agp)
129 goto vm_fault_error;
130
131 if (!dev->agp || !dev->agp->cant_use_aperture)
132 goto vm_fault_error;
133
134 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
135 goto vm_fault_error;
136
137 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
138 map = r_list->map;
139
140 if (map && map->type == _DRM_AGP) {
141 /*
142 * Using vm_pgoff as a selector forces us to use this unusual
143 * addressing scheme.
144 */
145 resource_size_t offset = vmf->address - vma->vm_start;
146 resource_size_t baddr = map->offset + offset;
147 struct drm_agp_mem *agpmem;
148 struct page *page;
149
150 #ifdef __alpha__
151 /*
152 * Adjust to a bus-relative address
153 */
154 baddr -= dev->hose->mem_space->start;
155 #endif
156
157 /*
158 * It's AGP memory - find the real physical page to map
159 */
160 list_for_each_entry(agpmem, &dev->agp->memory, head) {
161 if (agpmem->bound <= baddr &&
162 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
163 break;
164 }
165
166 if (&agpmem->head == &dev->agp->memory)
167 goto vm_fault_error;
168
169 /*
170 * Get the page, inc the use count, and return it
171 */
172 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
173 page = agpmem->memory->pages[offset];
174 get_page(page);
175 vmf->page = page;
176
177 DRM_DEBUG
178 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
179 (unsigned long long)baddr,
180 agpmem->memory->pages[offset],
181 (unsigned long long)offset,
182 page_count(page));
183 return 0;
184 }
185 vm_fault_error:
186 return VM_FAULT_SIGBUS; /* Disallow mremap */
187 }
188 #else
drm_vm_fault(struct vm_fault * vmf)189 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
190 {
191 return VM_FAULT_SIGBUS;
192 }
193 #endif
194
195 /**
196 * \c nopage method for shared virtual memory.
197 *
198 * \param vma virtual memory area.
199 * \param address access address.
200 * \return pointer to the page structure.
201 *
202 * Get the mapping, find the real physical page to map, get the page, and
203 * return it.
204 */
drm_vm_shm_fault(struct vm_fault * vmf)205 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
206 {
207 struct vm_area_struct *vma = vmf->vma;
208 struct drm_local_map *map = vma->vm_private_data;
209 unsigned long offset;
210 unsigned long i;
211 struct page *page;
212
213 if (!map)
214 return VM_FAULT_SIGBUS; /* Nothing allocated */
215
216 offset = vmf->address - vma->vm_start;
217 i = (unsigned long)map->handle + offset;
218 page = vmalloc_to_page((void *)i);
219 if (!page)
220 return VM_FAULT_SIGBUS;
221 get_page(page);
222 vmf->page = page;
223
224 DRM_DEBUG("shm_fault 0x%lx\n", offset);
225 return 0;
226 }
227
228 /**
229 * \c close method for shared virtual memory.
230 *
231 * \param vma virtual memory area.
232 *
233 * Deletes map information if we are the last
234 * person to close a mapping and it's not in the global maplist.
235 */
drm_vm_shm_close(struct vm_area_struct * vma)236 static void drm_vm_shm_close(struct vm_area_struct *vma)
237 {
238 struct drm_file *priv = vma->vm_file->private_data;
239 struct drm_device *dev = priv->minor->dev;
240 struct drm_vma_entry *pt, *temp;
241 struct drm_local_map *map;
242 struct drm_map_list *r_list;
243 int found_maps = 0;
244
245 DRM_DEBUG("0x%08lx,0x%08lx\n",
246 vma->vm_start, vma->vm_end - vma->vm_start);
247
248 map = vma->vm_private_data;
249
250 mutex_lock(&dev->struct_mutex);
251 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
252 if (pt->vma->vm_private_data == map)
253 found_maps++;
254 if (pt->vma == vma) {
255 list_del(&pt->head);
256 kfree(pt);
257 }
258 }
259
260 /* We were the only map that was found */
261 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
262 /* Check to see if we are in the maplist, if we are not, then
263 * we delete this mappings information.
264 */
265 found_maps = 0;
266 list_for_each_entry(r_list, &dev->maplist, head) {
267 if (r_list->map == map)
268 found_maps++;
269 }
270
271 if (!found_maps) {
272 drm_dma_handle_t dmah;
273
274 switch (map->type) {
275 case _DRM_REGISTERS:
276 case _DRM_FRAME_BUFFER:
277 arch_phys_wc_del(map->mtrr);
278 iounmap(map->handle);
279 break;
280 case _DRM_SHM:
281 vfree(map->handle);
282 break;
283 case _DRM_AGP:
284 case _DRM_SCATTER_GATHER:
285 break;
286 case _DRM_CONSISTENT:
287 dmah.vaddr = map->handle;
288 dmah.busaddr = map->offset;
289 dmah.size = map->size;
290 __drm_legacy_pci_free(dev, &dmah);
291 break;
292 }
293 kfree(map);
294 }
295 }
296 mutex_unlock(&dev->struct_mutex);
297 }
298
299 /**
300 * \c fault method for DMA virtual memory.
301 *
302 * \param address access address.
303 * \return pointer to the page structure.
304 *
305 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
306 */
drm_vm_dma_fault(struct vm_fault * vmf)307 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
308 {
309 struct vm_area_struct *vma = vmf->vma;
310 struct drm_file *priv = vma->vm_file->private_data;
311 struct drm_device *dev = priv->minor->dev;
312 struct drm_device_dma *dma = dev->dma;
313 unsigned long offset;
314 unsigned long page_nr;
315 struct page *page;
316
317 if (!dma)
318 return VM_FAULT_SIGBUS; /* Error */
319 if (!dma->pagelist)
320 return VM_FAULT_SIGBUS; /* Nothing allocated */
321
322 offset = vmf->address - vma->vm_start;
323 /* vm_[pg]off[set] should be 0 */
324 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
325 page = virt_to_page((void *)dma->pagelist[page_nr]);
326
327 get_page(page);
328 vmf->page = page;
329
330 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
331 return 0;
332 }
333
334 /**
335 * \c fault method for scatter-gather virtual memory.
336 *
337 * \param address access address.
338 * \return pointer to the page structure.
339 *
340 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
341 */
drm_vm_sg_fault(struct vm_fault * vmf)342 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
343 {
344 struct vm_area_struct *vma = vmf->vma;
345 struct drm_local_map *map = vma->vm_private_data;
346 struct drm_file *priv = vma->vm_file->private_data;
347 struct drm_device *dev = priv->minor->dev;
348 struct drm_sg_mem *entry = dev->sg;
349 unsigned long offset;
350 unsigned long map_offset;
351 unsigned long page_offset;
352 struct page *page;
353
354 if (!entry)
355 return VM_FAULT_SIGBUS; /* Error */
356 if (!entry->pagelist)
357 return VM_FAULT_SIGBUS; /* Nothing allocated */
358
359 offset = vmf->address - vma->vm_start;
360 map_offset = map->offset - (unsigned long)dev->sg->virtual;
361 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
362 page = entry->pagelist[page_offset];
363 get_page(page);
364 vmf->page = page;
365
366 return 0;
367 }
368
369 /** AGP virtual memory operations */
370 static const struct vm_operations_struct drm_vm_ops = {
371 .fault = drm_vm_fault,
372 .open = drm_vm_open,
373 .close = drm_vm_close,
374 };
375
376 /** Shared virtual memory operations */
377 static const struct vm_operations_struct drm_vm_shm_ops = {
378 .fault = drm_vm_shm_fault,
379 .open = drm_vm_open,
380 .close = drm_vm_shm_close,
381 };
382
383 /** DMA virtual memory operations */
384 static const struct vm_operations_struct drm_vm_dma_ops = {
385 .fault = drm_vm_dma_fault,
386 .open = drm_vm_open,
387 .close = drm_vm_close,
388 };
389
390 /** Scatter-gather virtual memory operations */
391 static const struct vm_operations_struct drm_vm_sg_ops = {
392 .fault = drm_vm_sg_fault,
393 .open = drm_vm_open,
394 .close = drm_vm_close,
395 };
396
drm_vm_open_locked(struct drm_device * dev,struct vm_area_struct * vma)397 static void drm_vm_open_locked(struct drm_device *dev,
398 struct vm_area_struct *vma)
399 {
400 struct drm_vma_entry *vma_entry;
401
402 DRM_DEBUG("0x%08lx,0x%08lx\n",
403 vma->vm_start, vma->vm_end - vma->vm_start);
404
405 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
406 if (vma_entry) {
407 vma_entry->vma = vma;
408 vma_entry->pid = current->pid;
409 list_add(&vma_entry->head, &dev->vmalist);
410 }
411 }
412
drm_vm_open(struct vm_area_struct * vma)413 static void drm_vm_open(struct vm_area_struct *vma)
414 {
415 struct drm_file *priv = vma->vm_file->private_data;
416 struct drm_device *dev = priv->minor->dev;
417
418 mutex_lock(&dev->struct_mutex);
419 drm_vm_open_locked(dev, vma);
420 mutex_unlock(&dev->struct_mutex);
421 }
422
drm_vm_close_locked(struct drm_device * dev,struct vm_area_struct * vma)423 static void drm_vm_close_locked(struct drm_device *dev,
424 struct vm_area_struct *vma)
425 {
426 struct drm_vma_entry *pt, *temp;
427
428 DRM_DEBUG("0x%08lx,0x%08lx\n",
429 vma->vm_start, vma->vm_end - vma->vm_start);
430
431 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
432 if (pt->vma == vma) {
433 list_del(&pt->head);
434 kfree(pt);
435 break;
436 }
437 }
438 }
439
440 /**
441 * \c close method for all virtual memory types.
442 *
443 * \param vma virtual memory area.
444 *
445 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
446 * free it.
447 */
drm_vm_close(struct vm_area_struct * vma)448 static void drm_vm_close(struct vm_area_struct *vma)
449 {
450 struct drm_file *priv = vma->vm_file->private_data;
451 struct drm_device *dev = priv->minor->dev;
452
453 mutex_lock(&dev->struct_mutex);
454 drm_vm_close_locked(dev, vma);
455 mutex_unlock(&dev->struct_mutex);
456 }
457
458 /**
459 * mmap DMA memory.
460 *
461 * \param file_priv DRM file private.
462 * \param vma virtual memory area.
463 * \return zero on success or a negative number on failure.
464 *
465 * Sets the virtual memory area operations structure to vm_dma_ops, the file
466 * pointer, and calls vm_open().
467 */
drm_mmap_dma(struct file * filp,struct vm_area_struct * vma)468 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
469 {
470 struct drm_file *priv = filp->private_data;
471 struct drm_device *dev;
472 struct drm_device_dma *dma;
473 unsigned long length = vma->vm_end - vma->vm_start;
474
475 dev = priv->minor->dev;
476 dma = dev->dma;
477 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
478 vma->vm_start, vma->vm_end, vma->vm_pgoff);
479
480 /* Length must match exact page count */
481 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
482 return -EINVAL;
483 }
484
485 if (!capable(CAP_SYS_ADMIN) &&
486 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
487 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
488 #if defined(__i386__) || defined(__x86_64__)
489 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
490 #else
491 /* Ye gads this is ugly. With more thought
492 we could move this up higher and use
493 `protection_map' instead. */
494 vma->vm_page_prot =
495 __pgprot(pte_val
496 (pte_wrprotect
497 (__pte(pgprot_val(vma->vm_page_prot)))));
498 #endif
499 }
500
501 vma->vm_ops = &drm_vm_dma_ops;
502
503 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
504
505 drm_vm_open_locked(dev, vma);
506 return 0;
507 }
508
drm_core_get_reg_ofs(struct drm_device * dev)509 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
510 {
511 #ifdef __alpha__
512 return dev->hose->dense_mem_base;
513 #else
514 return 0;
515 #endif
516 }
517
518 /**
519 * mmap DMA memory.
520 *
521 * \param file_priv DRM file private.
522 * \param vma virtual memory area.
523 * \return zero on success or a negative number on failure.
524 *
525 * If the virtual memory area has no offset associated with it then it's a DMA
526 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
527 * checks that the restricted flag is not set, sets the virtual memory operations
528 * according to the mapping type and remaps the pages. Finally sets the file
529 * pointer and calls vm_open().
530 */
drm_mmap_locked(struct file * filp,struct vm_area_struct * vma)531 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
532 {
533 struct drm_file *priv = filp->private_data;
534 struct drm_device *dev = priv->minor->dev;
535 struct drm_local_map *map = NULL;
536 resource_size_t offset = 0;
537 struct drm_hash_item *hash;
538
539 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
540 vma->vm_start, vma->vm_end, vma->vm_pgoff);
541
542 if (!priv->authenticated)
543 return -EACCES;
544
545 /* We check for "dma". On Apple's UniNorth, it's valid to have
546 * the AGP mapped at physical address 0
547 * --BenH.
548 */
549 if (!vma->vm_pgoff
550 #if IS_ENABLED(CONFIG_AGP)
551 && (!dev->agp
552 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
553 #endif
554 )
555 return drm_mmap_dma(filp, vma);
556
557 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
558 DRM_ERROR("Could not find map\n");
559 return -EINVAL;
560 }
561
562 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
563 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
564 return -EPERM;
565
566 /* Check for valid size. */
567 if (map->size < vma->vm_end - vma->vm_start)
568 return -EINVAL;
569
570 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
571 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
572 #if defined(__i386__) || defined(__x86_64__)
573 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
574 #else
575 /* Ye gads this is ugly. With more thought
576 we could move this up higher and use
577 `protection_map' instead. */
578 vma->vm_page_prot =
579 __pgprot(pte_val
580 (pte_wrprotect
581 (__pte(pgprot_val(vma->vm_page_prot)))));
582 #endif
583 }
584
585 switch (map->type) {
586 #if !defined(__arm__)
587 case _DRM_AGP:
588 if (dev->agp && dev->agp->cant_use_aperture) {
589 /*
590 * On some platforms we can't talk to bus dma address from the CPU, so for
591 * memory of type DRM_AGP, we'll deal with sorting out the real physical
592 * pages and mappings in fault()
593 */
594 #if defined(__powerpc__)
595 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
596 #endif
597 vma->vm_ops = &drm_vm_ops;
598 break;
599 }
600 #endif
601 /* fall through - to _DRM_FRAME_BUFFER... */
602 case _DRM_FRAME_BUFFER:
603 case _DRM_REGISTERS:
604 offset = drm_core_get_reg_ofs(dev);
605 vma->vm_page_prot = drm_io_prot(map, vma);
606 if (io_remap_pfn_range(vma, vma->vm_start,
607 (map->offset + offset) >> PAGE_SHIFT,
608 vma->vm_end - vma->vm_start,
609 vma->vm_page_prot))
610 return -EAGAIN;
611 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
612 " offset = 0x%llx\n",
613 map->type,
614 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
615
616 vma->vm_ops = &drm_vm_ops;
617 break;
618 case _DRM_CONSISTENT:
619 /* Consistent memory is really like shared memory. But
620 * it's allocated in a different way, so avoid fault */
621 if (remap_pfn_range(vma, vma->vm_start,
622 page_to_pfn(virt_to_page(map->handle)),
623 vma->vm_end - vma->vm_start, vma->vm_page_prot))
624 return -EAGAIN;
625 vma->vm_page_prot = drm_dma_prot(map->type, vma);
626 /* fall through - to _DRM_SHM */
627 case _DRM_SHM:
628 vma->vm_ops = &drm_vm_shm_ops;
629 vma->vm_private_data = (void *)map;
630 break;
631 case _DRM_SCATTER_GATHER:
632 vma->vm_ops = &drm_vm_sg_ops;
633 vma->vm_private_data = (void *)map;
634 vma->vm_page_prot = drm_dma_prot(map->type, vma);
635 break;
636 default:
637 return -EINVAL; /* This should never happen. */
638 }
639 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
640
641 drm_vm_open_locked(dev, vma);
642 return 0;
643 }
644
drm_legacy_mmap(struct file * filp,struct vm_area_struct * vma)645 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
646 {
647 struct drm_file *priv = filp->private_data;
648 struct drm_device *dev = priv->minor->dev;
649 int ret;
650
651 if (drm_dev_is_unplugged(dev))
652 return -ENODEV;
653
654 mutex_lock(&dev->struct_mutex);
655 ret = drm_mmap_locked(filp, vma);
656 mutex_unlock(&dev->struct_mutex);
657
658 return ret;
659 }
660 EXPORT_SYMBOL(drm_legacy_mmap);
661
662 #if IS_ENABLED(CONFIG_DRM_LEGACY)
drm_legacy_vma_flush(struct drm_device * dev)663 void drm_legacy_vma_flush(struct drm_device *dev)
664 {
665 struct drm_vma_entry *vma, *vma_temp;
666
667 /* Clear vma list (only needed for legacy drivers) */
668 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
669 list_del(&vma->head);
670 kfree(vma);
671 }
672 }
673 #endif
674