1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19
etnaviv_gem_scatter_map(struct etnaviv_gem_object * etnaviv_obj)20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 }
32
etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object * etnaviv_obj)33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 }
56
57 /* called with etnaviv_obj->lock held */
etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object * etnaviv_obj)58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63 if (IS_ERR(p)) {
64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 return PTR_ERR(p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71 }
72
put_pages(struct etnaviv_gem_object * etnaviv_obj)73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
84
85 etnaviv_obj->pages = NULL;
86 }
87 }
88
etnaviv_gem_get_pages(struct etnaviv_gem_object * etnaviv_obj)89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 etnaviv_obj->pages, npages);
108 if (IS_ERR(sgt)) {
109 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 PTR_ERR(sgt));
111 return ERR_CAST(sgt);
112 }
113
114 etnaviv_obj->sgt = sgt;
115
116 etnaviv_gem_scatter_map(etnaviv_obj);
117 }
118
119 return etnaviv_obj->pages;
120 }
121
etnaviv_gem_put_pages(struct etnaviv_gem_object * etnaviv_obj)122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 {
124 lockdep_assert_held(&etnaviv_obj->lock);
125 /* when we start tracking the pin count, then do something here */
126 }
127
etnaviv_gem_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129 struct vm_area_struct *vma)
130 {
131 pgprot_t vm_page_prot;
132
133 vma->vm_flags &= ~VM_PFNMAP;
134 vma->vm_flags |= VM_MIXEDMAP;
135
136 vm_page_prot = vm_get_page_prot(vma->vm_flags);
137
138 if (etnaviv_obj->flags & ETNA_BO_WC) {
139 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
140 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
141 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
142 } else {
143 /*
144 * Shunt off cached objs to shmem file so they have their own
145 * address_space (so unmap_mapping_range does what we want,
146 * in particular in the case of mmap'd dmabufs)
147 */
148 fput(vma->vm_file);
149 get_file(etnaviv_obj->base.filp);
150 vma->vm_pgoff = 0;
151 vma->vm_file = etnaviv_obj->base.filp;
152
153 vma->vm_page_prot = vm_page_prot;
154 }
155
156 return 0;
157 }
158
etnaviv_gem_mmap(struct file * filp,struct vm_area_struct * vma)159 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
160 {
161 struct etnaviv_gem_object *obj;
162 int ret;
163
164 ret = drm_gem_mmap(filp, vma);
165 if (ret) {
166 DBG("mmap failed: %d", ret);
167 return ret;
168 }
169
170 obj = to_etnaviv_bo(vma->vm_private_data);
171 return obj->ops->mmap(obj, vma);
172 }
173
etnaviv_gem_fault(struct vm_fault * vmf)174 vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
175 {
176 struct vm_area_struct *vma = vmf->vma;
177 struct drm_gem_object *obj = vma->vm_private_data;
178 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
179 struct page **pages, *page;
180 pgoff_t pgoff;
181 int err;
182
183 /*
184 * Make sure we don't parallel update on a fault, nor move or remove
185 * something from beneath our feet. Note that vmf_insert_page() is
186 * specifically coded to take care of this, so we don't have to.
187 */
188 err = mutex_lock_interruptible(&etnaviv_obj->lock);
189 if (err)
190 return VM_FAULT_NOPAGE;
191 /* make sure we have pages attached now */
192 pages = etnaviv_gem_get_pages(etnaviv_obj);
193 mutex_unlock(&etnaviv_obj->lock);
194
195 if (IS_ERR(pages)) {
196 err = PTR_ERR(pages);
197 return vmf_error(err);
198 }
199
200 /* We don't use vmf->pgoff since that has the fake offset: */
201 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
202
203 page = pages[pgoff];
204
205 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
206 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
207
208 return vmf_insert_page(vma, vmf->address, page);
209 }
210
etnaviv_gem_mmap_offset(struct drm_gem_object * obj,u64 * offset)211 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
212 {
213 int ret;
214
215 /* Make it mmapable */
216 ret = drm_gem_create_mmap_offset(obj);
217 if (ret)
218 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
219 else
220 *offset = drm_vma_node_offset_addr(&obj->vma_node);
221
222 return ret;
223 }
224
225 static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object * obj,struct etnaviv_iommu_context * context)226 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
227 struct etnaviv_iommu_context *context)
228 {
229 struct etnaviv_vram_mapping *mapping;
230
231 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
232 if (mapping->context == context)
233 return mapping;
234 }
235
236 return NULL;
237 }
238
etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping * mapping)239 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
240 {
241 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
242
243 mutex_lock(&etnaviv_obj->lock);
244 WARN_ON(mapping->use == 0);
245 mapping->use -= 1;
246 mutex_unlock(&etnaviv_obj->lock);
247
248 drm_gem_object_put(&etnaviv_obj->base);
249 }
250
etnaviv_gem_mapping_get(struct drm_gem_object * obj,struct etnaviv_iommu_context * mmu_context,u64 va)251 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
252 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
253 u64 va)
254 {
255 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
256 struct etnaviv_vram_mapping *mapping;
257 struct page **pages;
258 int ret = 0;
259
260 mutex_lock(&etnaviv_obj->lock);
261 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
262 if (mapping) {
263 /*
264 * Holding the object lock prevents the use count changing
265 * beneath us. If the use count is zero, the MMU might be
266 * reaping this object, so take the lock and re-check that
267 * the MMU owns this mapping to close this race.
268 */
269 if (mapping->use == 0) {
270 mutex_lock(&mmu_context->lock);
271 if (mapping->context == mmu_context)
272 mapping->use += 1;
273 else
274 mapping = NULL;
275 mutex_unlock(&mmu_context->lock);
276 if (mapping)
277 goto out;
278 } else {
279 mapping->use += 1;
280 goto out;
281 }
282 }
283
284 pages = etnaviv_gem_get_pages(etnaviv_obj);
285 if (IS_ERR(pages)) {
286 ret = PTR_ERR(pages);
287 goto out;
288 }
289
290 /*
291 * See if we have a reaped vram mapping we can re-use before
292 * allocating a fresh mapping.
293 */
294 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
295 if (!mapping) {
296 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
297 if (!mapping) {
298 ret = -ENOMEM;
299 goto out;
300 }
301
302 INIT_LIST_HEAD(&mapping->scan_node);
303 mapping->object = etnaviv_obj;
304 } else {
305 list_del(&mapping->obj_node);
306 }
307
308 etnaviv_iommu_context_get(mmu_context);
309 mapping->context = mmu_context;
310 mapping->use = 1;
311
312 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
313 mmu_context->global->memory_base,
314 mapping, va);
315 if (ret < 0) {
316 etnaviv_iommu_context_put(mmu_context);
317 kfree(mapping);
318 } else {
319 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
320 }
321
322 out:
323 mutex_unlock(&etnaviv_obj->lock);
324
325 if (ret)
326 return ERR_PTR(ret);
327
328 /* Take a reference on the object */
329 drm_gem_object_get(obj);
330 return mapping;
331 }
332
etnaviv_gem_vmap(struct drm_gem_object * obj)333 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
334 {
335 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
336
337 if (etnaviv_obj->vaddr)
338 return etnaviv_obj->vaddr;
339
340 mutex_lock(&etnaviv_obj->lock);
341 /*
342 * Need to check again, as we might have raced with another thread
343 * while waiting for the mutex.
344 */
345 if (!etnaviv_obj->vaddr)
346 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
347 mutex_unlock(&etnaviv_obj->lock);
348
349 return etnaviv_obj->vaddr;
350 }
351
etnaviv_gem_vmap_impl(struct etnaviv_gem_object * obj)352 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
353 {
354 struct page **pages;
355
356 lockdep_assert_held(&obj->lock);
357
358 pages = etnaviv_gem_get_pages(obj);
359 if (IS_ERR(pages))
360 return NULL;
361
362 return vmap(pages, obj->base.size >> PAGE_SHIFT,
363 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
364 }
365
etnaviv_op_to_dma_dir(u32 op)366 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
367 {
368 if (op & ETNA_PREP_READ)
369 return DMA_FROM_DEVICE;
370 else if (op & ETNA_PREP_WRITE)
371 return DMA_TO_DEVICE;
372 else
373 return DMA_BIDIRECTIONAL;
374 }
375
etnaviv_gem_cpu_prep(struct drm_gem_object * obj,u32 op,struct drm_etnaviv_timespec * timeout)376 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
377 struct drm_etnaviv_timespec *timeout)
378 {
379 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
380 struct drm_device *dev = obj->dev;
381 bool write = !!(op & ETNA_PREP_WRITE);
382 int ret;
383
384 if (!etnaviv_obj->sgt) {
385 void *ret;
386
387 mutex_lock(&etnaviv_obj->lock);
388 ret = etnaviv_gem_get_pages(etnaviv_obj);
389 mutex_unlock(&etnaviv_obj->lock);
390 if (IS_ERR(ret))
391 return PTR_ERR(ret);
392 }
393
394 if (op & ETNA_PREP_NOSYNC) {
395 if (!dma_resv_test_signaled_rcu(obj->resv,
396 write))
397 return -EBUSY;
398 } else {
399 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
400
401 ret = dma_resv_wait_timeout_rcu(obj->resv,
402 write, true, remain);
403 if (ret <= 0)
404 return ret == 0 ? -ETIMEDOUT : ret;
405 }
406
407 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
408 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
409 etnaviv_op_to_dma_dir(op));
410 etnaviv_obj->last_cpu_prep_op = op;
411 }
412
413 return 0;
414 }
415
etnaviv_gem_cpu_fini(struct drm_gem_object * obj)416 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
417 {
418 struct drm_device *dev = obj->dev;
419 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
420
421 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
422 /* fini without a prep is almost certainly a userspace error */
423 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
424 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
425 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
426 etnaviv_obj->last_cpu_prep_op = 0;
427 }
428
429 return 0;
430 }
431
etnaviv_gem_wait_bo(struct etnaviv_gpu * gpu,struct drm_gem_object * obj,struct drm_etnaviv_timespec * timeout)432 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
433 struct drm_etnaviv_timespec *timeout)
434 {
435 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
436
437 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
438 }
439
440 #ifdef CONFIG_DEBUG_FS
etnaviv_gem_describe_fence(struct dma_fence * fence,const char * type,struct seq_file * m)441 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
442 const char *type, struct seq_file *m)
443 {
444 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
445 seq_printf(m, "\t%9s: %s %s seq %llu\n",
446 type,
447 fence->ops->get_driver_name(fence),
448 fence->ops->get_timeline_name(fence),
449 fence->seqno);
450 }
451
etnaviv_gem_describe(struct drm_gem_object * obj,struct seq_file * m)452 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
453 {
454 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
455 struct dma_resv *robj = obj->resv;
456 struct dma_resv_list *fobj;
457 struct dma_fence *fence;
458 unsigned long off = drm_vma_node_start(&obj->vma_node);
459
460 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
461 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
462 obj->name, kref_read(&obj->refcount),
463 off, etnaviv_obj->vaddr, obj->size);
464
465 rcu_read_lock();
466 fobj = rcu_dereference(robj->fence);
467 if (fobj) {
468 unsigned int i, shared_count = fobj->shared_count;
469
470 for (i = 0; i < shared_count; i++) {
471 fence = rcu_dereference(fobj->shared[i]);
472 etnaviv_gem_describe_fence(fence, "Shared", m);
473 }
474 }
475
476 fence = rcu_dereference(robj->fence_excl);
477 if (fence)
478 etnaviv_gem_describe_fence(fence, "Exclusive", m);
479 rcu_read_unlock();
480 }
481
etnaviv_gem_describe_objects(struct etnaviv_drm_private * priv,struct seq_file * m)482 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
483 struct seq_file *m)
484 {
485 struct etnaviv_gem_object *etnaviv_obj;
486 int count = 0;
487 size_t size = 0;
488
489 mutex_lock(&priv->gem_lock);
490 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
491 struct drm_gem_object *obj = &etnaviv_obj->base;
492
493 seq_puts(m, " ");
494 etnaviv_gem_describe(obj, m);
495 count++;
496 size += obj->size;
497 }
498 mutex_unlock(&priv->gem_lock);
499
500 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
501 }
502 #endif
503
etnaviv_gem_shmem_release(struct etnaviv_gem_object * etnaviv_obj)504 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
505 {
506 vunmap(etnaviv_obj->vaddr);
507 put_pages(etnaviv_obj);
508 }
509
510 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
511 .get_pages = etnaviv_gem_shmem_get_pages,
512 .release = etnaviv_gem_shmem_release,
513 .vmap = etnaviv_gem_vmap_impl,
514 .mmap = etnaviv_gem_mmap_obj,
515 };
516
etnaviv_gem_free_object(struct drm_gem_object * obj)517 void etnaviv_gem_free_object(struct drm_gem_object *obj)
518 {
519 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
520 struct etnaviv_drm_private *priv = obj->dev->dev_private;
521 struct etnaviv_vram_mapping *mapping, *tmp;
522
523 /* object should not be active */
524 WARN_ON(is_active(etnaviv_obj));
525
526 mutex_lock(&priv->gem_lock);
527 list_del(&etnaviv_obj->gem_node);
528 mutex_unlock(&priv->gem_lock);
529
530 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
531 obj_node) {
532 struct etnaviv_iommu_context *context = mapping->context;
533
534 WARN_ON(mapping->use);
535
536 if (context) {
537 etnaviv_iommu_unmap_gem(context, mapping);
538 etnaviv_iommu_context_put(context);
539 }
540
541 list_del(&mapping->obj_node);
542 kfree(mapping);
543 }
544
545 drm_gem_free_mmap_offset(obj);
546 etnaviv_obj->ops->release(etnaviv_obj);
547 drm_gem_object_release(obj);
548
549 kfree(etnaviv_obj);
550 }
551
etnaviv_gem_obj_add(struct drm_device * dev,struct drm_gem_object * obj)552 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
553 {
554 struct etnaviv_drm_private *priv = dev->dev_private;
555 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
556
557 mutex_lock(&priv->gem_lock);
558 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
559 mutex_unlock(&priv->gem_lock);
560 }
561
etnaviv_gem_new_impl(struct drm_device * dev,u32 size,u32 flags,const struct etnaviv_gem_ops * ops,struct drm_gem_object ** obj)562 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
563 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
564 {
565 struct etnaviv_gem_object *etnaviv_obj;
566 unsigned sz = sizeof(*etnaviv_obj);
567 bool valid = true;
568
569 /* validate flags */
570 switch (flags & ETNA_BO_CACHE_MASK) {
571 case ETNA_BO_UNCACHED:
572 case ETNA_BO_CACHED:
573 case ETNA_BO_WC:
574 break;
575 default:
576 valid = false;
577 }
578
579 if (!valid) {
580 dev_err(dev->dev, "invalid cache flag: %x\n",
581 (flags & ETNA_BO_CACHE_MASK));
582 return -EINVAL;
583 }
584
585 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
586 if (!etnaviv_obj)
587 return -ENOMEM;
588
589 etnaviv_obj->flags = flags;
590 etnaviv_obj->ops = ops;
591
592 mutex_init(&etnaviv_obj->lock);
593 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
594
595 *obj = &etnaviv_obj->base;
596
597 return 0;
598 }
599
600 /* convenience method to construct a GEM buffer object, and userspace handle */
etnaviv_gem_new_handle(struct drm_device * dev,struct drm_file * file,u32 size,u32 flags,u32 * handle)601 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
602 u32 size, u32 flags, u32 *handle)
603 {
604 struct etnaviv_drm_private *priv = dev->dev_private;
605 struct drm_gem_object *obj = NULL;
606 int ret;
607
608 size = PAGE_ALIGN(size);
609
610 ret = etnaviv_gem_new_impl(dev, size, flags,
611 &etnaviv_gem_shmem_ops, &obj);
612 if (ret)
613 goto fail;
614
615 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
616
617 ret = drm_gem_object_init(dev, obj, size);
618 if (ret)
619 goto fail;
620
621 /*
622 * Our buffers are kept pinned, so allocating them from the MOVABLE
623 * zone is a really bad idea, and conflicts with CMA. See comments
624 * above new_inode() why this is required _and_ expected if you're
625 * going to pin these pages.
626 */
627 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
628
629 etnaviv_gem_obj_add(dev, obj);
630
631 ret = drm_gem_handle_create(file, obj, handle);
632
633 /* drop reference from allocate - handle holds it now */
634 fail:
635 drm_gem_object_put(obj);
636
637 return ret;
638 }
639
etnaviv_gem_new_private(struct drm_device * dev,size_t size,u32 flags,const struct etnaviv_gem_ops * ops,struct etnaviv_gem_object ** res)640 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
641 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
642 {
643 struct drm_gem_object *obj;
644 int ret;
645
646 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
647 if (ret)
648 return ret;
649
650 drm_gem_private_object_init(dev, obj, size);
651
652 *res = to_etnaviv_bo(obj);
653
654 return 0;
655 }
656
etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object * etnaviv_obj)657 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
658 {
659 struct page **pvec = NULL;
660 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
661 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
662
663 might_lock_read(¤t->mm->mmap_lock);
664
665 if (userptr->mm != current->mm)
666 return -EPERM;
667
668 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
669 if (!pvec)
670 return -ENOMEM;
671
672 do {
673 unsigned num_pages = npages - pinned;
674 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
675 struct page **pages = pvec + pinned;
676
677 ret = pin_user_pages_fast(ptr, num_pages,
678 !userptr->ro ? FOLL_WRITE : 0, pages);
679 if (ret < 0) {
680 unpin_user_pages(pvec, pinned);
681 kvfree(pvec);
682 return ret;
683 }
684
685 pinned += ret;
686
687 } while (pinned < npages);
688
689 etnaviv_obj->pages = pvec;
690
691 return 0;
692 }
693
etnaviv_gem_userptr_release(struct etnaviv_gem_object * etnaviv_obj)694 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
695 {
696 if (etnaviv_obj->sgt) {
697 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
698 sg_free_table(etnaviv_obj->sgt);
699 kfree(etnaviv_obj->sgt);
700 }
701 if (etnaviv_obj->pages) {
702 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
703
704 unpin_user_pages(etnaviv_obj->pages, npages);
705 kvfree(etnaviv_obj->pages);
706 }
707 }
708
etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)709 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
710 struct vm_area_struct *vma)
711 {
712 return -EINVAL;
713 }
714
715 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
716 .get_pages = etnaviv_gem_userptr_get_pages,
717 .release = etnaviv_gem_userptr_release,
718 .vmap = etnaviv_gem_vmap_impl,
719 .mmap = etnaviv_gem_userptr_mmap_obj,
720 };
721
etnaviv_gem_new_userptr(struct drm_device * dev,struct drm_file * file,uintptr_t ptr,u32 size,u32 flags,u32 * handle)722 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
723 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
724 {
725 struct etnaviv_gem_object *etnaviv_obj;
726 int ret;
727
728 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
729 &etnaviv_gem_userptr_ops, &etnaviv_obj);
730 if (ret)
731 return ret;
732
733 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
734
735 etnaviv_obj->userptr.ptr = ptr;
736 etnaviv_obj->userptr.mm = current->mm;
737 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
738
739 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
740
741 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
742
743 /* drop reference from allocate - handle holds it now */
744 drm_gem_object_put(&etnaviv_obj->base);
745 return ret;
746 }
747