Lines Matching refs:vma
62 struct vm_area_struct *vma; member
66 static void drm_vm_open(struct vm_area_struct *vma);
67 static void drm_vm_close(struct vm_area_struct *vma);
70 struct vm_area_struct *vma) in drm_io_prot() argument
72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
84 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
85 vma->vm_start)) in drm_io_prot()
95 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
97 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
118 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
119 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_fault()
134 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) in drm_vm_fault()
145 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault()
207 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() local
208 struct drm_local_map *map = vma->vm_private_data; in drm_vm_shm_fault()
216 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault()
236 static void drm_vm_shm_close(struct vm_area_struct *vma) in drm_vm_shm_close() argument
238 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_shm_close()
246 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_shm_close()
248 map = vma->vm_private_data; in drm_vm_shm_close()
252 if (pt->vma->vm_private_data == map) in drm_vm_shm_close()
254 if (pt->vma == vma) { in drm_vm_shm_close()
309 struct vm_area_struct *vma = vmf->vma; in drm_vm_dma_fault() local
310 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_dma_fault()
322 offset = vmf->address - vma->vm_start; in drm_vm_dma_fault()
344 struct vm_area_struct *vma = vmf->vma; in drm_vm_sg_fault() local
345 struct drm_local_map *map = vma->vm_private_data; in drm_vm_sg_fault()
346 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_sg_fault()
359 offset = vmf->address - vma->vm_start; in drm_vm_sg_fault()
398 struct vm_area_struct *vma) in drm_vm_open_locked() argument
403 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open_locked()
407 vma_entry->vma = vma; in drm_vm_open_locked()
413 static void drm_vm_open(struct vm_area_struct *vma) in drm_vm_open() argument
415 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_open()
419 drm_vm_open_locked(dev, vma); in drm_vm_open()
424 struct vm_area_struct *vma) in drm_vm_close_locked() argument
429 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close_locked()
432 if (pt->vma == vma) { in drm_vm_close_locked()
448 static void drm_vm_close(struct vm_area_struct *vma) in drm_vm_close() argument
450 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_close()
454 drm_vm_close_locked(dev, vma); in drm_vm_close()
468 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) in drm_mmap_dma() argument
473 unsigned long length = vma->vm_end - vma->vm_start; in drm_mmap_dma()
478 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_dma()
487 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); in drm_mmap_dma()
489 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; in drm_mmap_dma()
494 vma->vm_page_prot = in drm_mmap_dma()
497 (__pte(pgprot_val(vma->vm_page_prot))))); in drm_mmap_dma()
501 vma->vm_ops = &drm_vm_dma_ops; in drm_mmap_dma()
503 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in drm_mmap_dma()
505 drm_vm_open_locked(dev, vma); in drm_mmap_dma()
531 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) in drm_mmap_locked() argument
540 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_locked()
549 if (!vma->vm_pgoff in drm_mmap_locked()
555 return drm_mmap_dma(filp, vma); in drm_mmap_locked()
557 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { in drm_mmap_locked()
567 if (map->size < vma->vm_end - vma->vm_start) in drm_mmap_locked()
571 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); in drm_mmap_locked()
573 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; in drm_mmap_locked()
578 vma->vm_page_prot = in drm_mmap_locked()
581 (__pte(pgprot_val(vma->vm_page_prot))))); in drm_mmap_locked()
595 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in drm_mmap_locked()
597 vma->vm_ops = &drm_vm_ops; in drm_mmap_locked()
605 vma->vm_page_prot = drm_io_prot(map, vma); in drm_mmap_locked()
606 if (io_remap_pfn_range(vma, vma->vm_start, in drm_mmap_locked()
608 vma->vm_end - vma->vm_start, in drm_mmap_locked()
609 vma->vm_page_prot)) in drm_mmap_locked()
614 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); in drm_mmap_locked()
616 vma->vm_ops = &drm_vm_ops; in drm_mmap_locked()
621 if (remap_pfn_range(vma, vma->vm_start, in drm_mmap_locked()
623 vma->vm_end - vma->vm_start, vma->vm_page_prot)) in drm_mmap_locked()
625 vma->vm_page_prot = drm_dma_prot(map->type, vma); in drm_mmap_locked()
628 vma->vm_ops = &drm_vm_shm_ops; in drm_mmap_locked()
629 vma->vm_private_data = (void *)map; in drm_mmap_locked()
632 vma->vm_ops = &drm_vm_sg_ops; in drm_mmap_locked()
633 vma->vm_private_data = (void *)map; in drm_mmap_locked()
634 vma->vm_page_prot = drm_dma_prot(map->type, vma); in drm_mmap_locked()
639 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in drm_mmap_locked()
641 drm_vm_open_locked(dev, vma); in drm_mmap_locked()
645 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) in drm_legacy_mmap() argument
655 ret = drm_mmap_locked(filp, vma); in drm_legacy_mmap()
665 struct drm_vma_entry *vma, *vma_temp; in drm_legacy_vma_flush() local
668 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { in drm_legacy_vma_flush()
669 list_del(&vma->head); in drm_legacy_vma_flush()
670 kfree(vma); in drm_legacy_vma_flush()