1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <linux/component.h>
7 #include <linux/of_platform.h>
8 #include <drm/drm_of.h>
9
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_mmu.h"
15 #include "etnaviv_perfmon.h"
16
17 /*
18 * DRM operations:
19 */
20
21
load_gpu(struct drm_device * dev)22 static void load_gpu(struct drm_device *dev)
23 {
24 struct etnaviv_drm_private *priv = dev->dev_private;
25 unsigned int i;
26
27 for (i = 0; i < ETNA_MAX_PIPES; i++) {
28 struct etnaviv_gpu *g = priv->gpu[i];
29
30 if (g) {
31 int ret;
32
33 ret = etnaviv_gpu_init(g);
34 if (ret)
35 priv->gpu[i] = NULL;
36 }
37 }
38 }
39
etnaviv_open(struct drm_device * dev,struct drm_file * file)40 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
41 {
42 struct etnaviv_drm_private *priv = dev->dev_private;
43 struct etnaviv_file_private *ctx;
44 int i;
45
46 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
47 if (!ctx)
48 return -ENOMEM;
49
50 for (i = 0; i < ETNA_MAX_PIPES; i++) {
51 struct etnaviv_gpu *gpu = priv->gpu[i];
52 struct drm_sched_rq *rq;
53
54 if (gpu) {
55 rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
56 drm_sched_entity_init(&ctx->sched_entity[i],
57 &rq, 1, NULL);
58 }
59 }
60
61 file->driver_priv = ctx;
62
63 return 0;
64 }
65
etnaviv_postclose(struct drm_device * dev,struct drm_file * file)66 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
67 {
68 struct etnaviv_drm_private *priv = dev->dev_private;
69 struct etnaviv_file_private *ctx = file->driver_priv;
70 unsigned int i;
71
72 for (i = 0; i < ETNA_MAX_PIPES; i++) {
73 struct etnaviv_gpu *gpu = priv->gpu[i];
74
75 if (gpu) {
76 mutex_lock(&gpu->lock);
77 if (gpu->lastctx == ctx)
78 gpu->lastctx = NULL;
79 mutex_unlock(&gpu->lock);
80
81 drm_sched_entity_destroy(&ctx->sched_entity[i]);
82 }
83 }
84
85 kfree(ctx);
86 }
87
88 /*
89 * DRM debugfs:
90 */
91
92 #ifdef CONFIG_DEBUG_FS
etnaviv_gem_show(struct drm_device * dev,struct seq_file * m)93 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
94 {
95 struct etnaviv_drm_private *priv = dev->dev_private;
96
97 etnaviv_gem_describe_objects(priv, m);
98
99 return 0;
100 }
101
etnaviv_mm_show(struct drm_device * dev,struct seq_file * m)102 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
103 {
104 struct drm_printer p = drm_seq_file_printer(m);
105
106 read_lock(&dev->vma_offset_manager->vm_lock);
107 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
108 read_unlock(&dev->vma_offset_manager->vm_lock);
109
110 return 0;
111 }
112
etnaviv_mmu_show(struct etnaviv_gpu * gpu,struct seq_file * m)113 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
114 {
115 struct drm_printer p = drm_seq_file_printer(m);
116
117 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
118
119 mutex_lock(&gpu->mmu->lock);
120 drm_mm_print(&gpu->mmu->mm, &p);
121 mutex_unlock(&gpu->mmu->lock);
122
123 return 0;
124 }
125
etnaviv_buffer_dump(struct etnaviv_gpu * gpu,struct seq_file * m)126 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
127 {
128 struct etnaviv_cmdbuf *buf = &gpu->buffer;
129 u32 size = buf->size;
130 u32 *ptr = buf->vaddr;
131 u32 i;
132
133 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
134 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
135 size - buf->user_size);
136
137 for (i = 0; i < size / 4; i++) {
138 if (i && !(i % 4))
139 seq_puts(m, "\n");
140 if (i % 4 == 0)
141 seq_printf(m, "\t0x%p: ", ptr + i);
142 seq_printf(m, "%08x ", *(ptr + i));
143 }
144 seq_puts(m, "\n");
145 }
146
etnaviv_ring_show(struct etnaviv_gpu * gpu,struct seq_file * m)147 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
148 {
149 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
150
151 mutex_lock(&gpu->lock);
152 etnaviv_buffer_dump(gpu, m);
153 mutex_unlock(&gpu->lock);
154
155 return 0;
156 }
157
show_unlocked(struct seq_file * m,void * arg)158 static int show_unlocked(struct seq_file *m, void *arg)
159 {
160 struct drm_info_node *node = (struct drm_info_node *) m->private;
161 struct drm_device *dev = node->minor->dev;
162 int (*show)(struct drm_device *dev, struct seq_file *m) =
163 node->info_ent->data;
164
165 return show(dev, m);
166 }
167
show_each_gpu(struct seq_file * m,void * arg)168 static int show_each_gpu(struct seq_file *m, void *arg)
169 {
170 struct drm_info_node *node = (struct drm_info_node *) m->private;
171 struct drm_device *dev = node->minor->dev;
172 struct etnaviv_drm_private *priv = dev->dev_private;
173 struct etnaviv_gpu *gpu;
174 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
175 node->info_ent->data;
176 unsigned int i;
177 int ret = 0;
178
179 for (i = 0; i < ETNA_MAX_PIPES; i++) {
180 gpu = priv->gpu[i];
181 if (!gpu)
182 continue;
183
184 ret = show(gpu, m);
185 if (ret < 0)
186 break;
187 }
188
189 return ret;
190 }
191
192 static struct drm_info_list etnaviv_debugfs_list[] = {
193 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
194 {"gem", show_unlocked, 0, etnaviv_gem_show},
195 { "mm", show_unlocked, 0, etnaviv_mm_show },
196 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
197 {"ring", show_each_gpu, 0, etnaviv_ring_show},
198 };
199
etnaviv_debugfs_init(struct drm_minor * minor)200 static int etnaviv_debugfs_init(struct drm_minor *minor)
201 {
202 struct drm_device *dev = minor->dev;
203 int ret;
204
205 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
206 ARRAY_SIZE(etnaviv_debugfs_list),
207 minor->debugfs_root, minor);
208
209 if (ret) {
210 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
211 return ret;
212 }
213
214 return ret;
215 }
216 #endif
217
218 /*
219 * DRM ioctls:
220 */
221
etnaviv_ioctl_get_param(struct drm_device * dev,void * data,struct drm_file * file)222 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
223 struct drm_file *file)
224 {
225 struct etnaviv_drm_private *priv = dev->dev_private;
226 struct drm_etnaviv_param *args = data;
227 struct etnaviv_gpu *gpu;
228
229 if (args->pipe >= ETNA_MAX_PIPES)
230 return -EINVAL;
231
232 gpu = priv->gpu[args->pipe];
233 if (!gpu)
234 return -ENXIO;
235
236 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
237 }
238
etnaviv_ioctl_gem_new(struct drm_device * dev,void * data,struct drm_file * file)239 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
240 struct drm_file *file)
241 {
242 struct drm_etnaviv_gem_new *args = data;
243
244 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
245 ETNA_BO_FORCE_MMU))
246 return -EINVAL;
247
248 return etnaviv_gem_new_handle(dev, file, args->size,
249 args->flags, &args->handle);
250 }
251
252 #define TS(t) ((struct timespec){ \
253 .tv_sec = (t).tv_sec, \
254 .tv_nsec = (t).tv_nsec \
255 })
256
etnaviv_ioctl_gem_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file)257 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
258 struct drm_file *file)
259 {
260 struct drm_etnaviv_gem_cpu_prep *args = data;
261 struct drm_gem_object *obj;
262 int ret;
263
264 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
265 return -EINVAL;
266
267 obj = drm_gem_object_lookup(file, args->handle);
268 if (!obj)
269 return -ENOENT;
270
271 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
272
273 drm_gem_object_put_unlocked(obj);
274
275 return ret;
276 }
277
etnaviv_ioctl_gem_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file)278 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
279 struct drm_file *file)
280 {
281 struct drm_etnaviv_gem_cpu_fini *args = data;
282 struct drm_gem_object *obj;
283 int ret;
284
285 if (args->flags)
286 return -EINVAL;
287
288 obj = drm_gem_object_lookup(file, args->handle);
289 if (!obj)
290 return -ENOENT;
291
292 ret = etnaviv_gem_cpu_fini(obj);
293
294 drm_gem_object_put_unlocked(obj);
295
296 return ret;
297 }
298
etnaviv_ioctl_gem_info(struct drm_device * dev,void * data,struct drm_file * file)299 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
300 struct drm_file *file)
301 {
302 struct drm_etnaviv_gem_info *args = data;
303 struct drm_gem_object *obj;
304 int ret;
305
306 if (args->pad)
307 return -EINVAL;
308
309 obj = drm_gem_object_lookup(file, args->handle);
310 if (!obj)
311 return -ENOENT;
312
313 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
314 drm_gem_object_put_unlocked(obj);
315
316 return ret;
317 }
318
etnaviv_ioctl_wait_fence(struct drm_device * dev,void * data,struct drm_file * file)319 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
320 struct drm_file *file)
321 {
322 struct drm_etnaviv_wait_fence *args = data;
323 struct etnaviv_drm_private *priv = dev->dev_private;
324 struct timespec *timeout = &TS(args->timeout);
325 struct etnaviv_gpu *gpu;
326
327 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
328 return -EINVAL;
329
330 if (args->pipe >= ETNA_MAX_PIPES)
331 return -EINVAL;
332
333 gpu = priv->gpu[args->pipe];
334 if (!gpu)
335 return -ENXIO;
336
337 if (args->flags & ETNA_WAIT_NONBLOCK)
338 timeout = NULL;
339
340 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
341 timeout);
342 }
343
etnaviv_ioctl_gem_userptr(struct drm_device * dev,void * data,struct drm_file * file)344 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
345 struct drm_file *file)
346 {
347 struct drm_etnaviv_gem_userptr *args = data;
348 int access;
349
350 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
351 args->flags == 0)
352 return -EINVAL;
353
354 if (offset_in_page(args->user_ptr | args->user_size) ||
355 (uintptr_t)args->user_ptr != args->user_ptr ||
356 (u32)args->user_size != args->user_size ||
357 args->user_ptr & ~PAGE_MASK)
358 return -EINVAL;
359
360 if (args->flags & ETNA_USERPTR_WRITE)
361 access = VERIFY_WRITE;
362 else
363 access = VERIFY_READ;
364
365 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
366 args->user_size))
367 return -EFAULT;
368
369 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
370 args->user_size, args->flags,
371 &args->handle);
372 }
373
etnaviv_ioctl_gem_wait(struct drm_device * dev,void * data,struct drm_file * file)374 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
375 struct drm_file *file)
376 {
377 struct etnaviv_drm_private *priv = dev->dev_private;
378 struct drm_etnaviv_gem_wait *args = data;
379 struct timespec *timeout = &TS(args->timeout);
380 struct drm_gem_object *obj;
381 struct etnaviv_gpu *gpu;
382 int ret;
383
384 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
385 return -EINVAL;
386
387 if (args->pipe >= ETNA_MAX_PIPES)
388 return -EINVAL;
389
390 gpu = priv->gpu[args->pipe];
391 if (!gpu)
392 return -ENXIO;
393
394 obj = drm_gem_object_lookup(file, args->handle);
395 if (!obj)
396 return -ENOENT;
397
398 if (args->flags & ETNA_WAIT_NONBLOCK)
399 timeout = NULL;
400
401 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
402
403 drm_gem_object_put_unlocked(obj);
404
405 return ret;
406 }
407
etnaviv_ioctl_pm_query_dom(struct drm_device * dev,void * data,struct drm_file * file)408 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
409 struct drm_file *file)
410 {
411 struct etnaviv_drm_private *priv = dev->dev_private;
412 struct drm_etnaviv_pm_domain *args = data;
413 struct etnaviv_gpu *gpu;
414
415 if (args->pipe >= ETNA_MAX_PIPES)
416 return -EINVAL;
417
418 gpu = priv->gpu[args->pipe];
419 if (!gpu)
420 return -ENXIO;
421
422 return etnaviv_pm_query_dom(gpu, args);
423 }
424
etnaviv_ioctl_pm_query_sig(struct drm_device * dev,void * data,struct drm_file * file)425 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
426 struct drm_file *file)
427 {
428 struct etnaviv_drm_private *priv = dev->dev_private;
429 struct drm_etnaviv_pm_signal *args = data;
430 struct etnaviv_gpu *gpu;
431
432 if (args->pipe >= ETNA_MAX_PIPES)
433 return -EINVAL;
434
435 gpu = priv->gpu[args->pipe];
436 if (!gpu)
437 return -ENXIO;
438
439 return etnaviv_pm_query_sig(gpu, args);
440 }
441
442 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
443 #define ETNA_IOCTL(n, func, flags) \
444 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
445 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
446 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
447 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
448 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
449 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
450 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
451 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
452 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
453 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
454 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
455 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
456 };
457
458 static const struct vm_operations_struct vm_ops = {
459 .fault = etnaviv_gem_fault,
460 .open = drm_gem_vm_open,
461 .close = drm_gem_vm_close,
462 };
463
464 static const struct file_operations fops = {
465 .owner = THIS_MODULE,
466 .open = drm_open,
467 .release = drm_release,
468 .unlocked_ioctl = drm_ioctl,
469 .compat_ioctl = drm_compat_ioctl,
470 .poll = drm_poll,
471 .read = drm_read,
472 .llseek = no_llseek,
473 .mmap = etnaviv_gem_mmap,
474 };
475
476 static struct drm_driver etnaviv_drm_driver = {
477 .driver_features = DRIVER_GEM |
478 DRIVER_PRIME |
479 DRIVER_RENDER,
480 .open = etnaviv_open,
481 .postclose = etnaviv_postclose,
482 .gem_free_object_unlocked = etnaviv_gem_free_object,
483 .gem_vm_ops = &vm_ops,
484 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
485 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
486 .gem_prime_export = drm_gem_prime_export,
487 .gem_prime_import = drm_gem_prime_import,
488 .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
489 .gem_prime_pin = etnaviv_gem_prime_pin,
490 .gem_prime_unpin = etnaviv_gem_prime_unpin,
491 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
492 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
493 .gem_prime_vmap = etnaviv_gem_prime_vmap,
494 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
495 .gem_prime_mmap = etnaviv_gem_prime_mmap,
496 #ifdef CONFIG_DEBUG_FS
497 .debugfs_init = etnaviv_debugfs_init,
498 #endif
499 .ioctls = etnaviv_ioctls,
500 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
501 .fops = &fops,
502 .name = "etnaviv",
503 .desc = "etnaviv DRM",
504 .date = "20151214",
505 .major = 1,
506 .minor = 2,
507 };
508
509 /*
510 * Platform driver:
511 */
etnaviv_bind(struct device * dev)512 static int etnaviv_bind(struct device *dev)
513 {
514 struct etnaviv_drm_private *priv;
515 struct drm_device *drm;
516 int ret;
517
518 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
519 if (IS_ERR(drm))
520 return PTR_ERR(drm);
521
522 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
523 if (!priv) {
524 dev_err(dev, "failed to allocate private data\n");
525 ret = -ENOMEM;
526 goto out_unref;
527 }
528 drm->dev_private = priv;
529
530 mutex_init(&priv->gem_lock);
531 INIT_LIST_HEAD(&priv->gem_list);
532 priv->num_gpus = 0;
533
534 dev_set_drvdata(dev, drm);
535
536 ret = component_bind_all(dev, drm);
537 if (ret < 0)
538 goto out_bind;
539
540 load_gpu(drm);
541
542 ret = drm_dev_register(drm, 0);
543 if (ret)
544 goto out_register;
545
546 return 0;
547
548 out_register:
549 component_unbind_all(dev, drm);
550 out_bind:
551 kfree(priv);
552 out_unref:
553 drm_dev_unref(drm);
554
555 return ret;
556 }
557
etnaviv_unbind(struct device * dev)558 static void etnaviv_unbind(struct device *dev)
559 {
560 struct drm_device *drm = dev_get_drvdata(dev);
561 struct etnaviv_drm_private *priv = drm->dev_private;
562
563 drm_dev_unregister(drm);
564
565 component_unbind_all(dev, drm);
566
567 drm->dev_private = NULL;
568 kfree(priv);
569
570 drm_dev_unref(drm);
571 }
572
573 static const struct component_master_ops etnaviv_master_ops = {
574 .bind = etnaviv_bind,
575 .unbind = etnaviv_unbind,
576 };
577
compare_of(struct device * dev,void * data)578 static int compare_of(struct device *dev, void *data)
579 {
580 struct device_node *np = data;
581
582 return dev->of_node == np;
583 }
584
compare_str(struct device * dev,void * data)585 static int compare_str(struct device *dev, void *data)
586 {
587 return !strcmp(dev_name(dev), data);
588 }
589
etnaviv_pdev_probe(struct platform_device * pdev)590 static int etnaviv_pdev_probe(struct platform_device *pdev)
591 {
592 struct device *dev = &pdev->dev;
593 struct component_match *match = NULL;
594
595 if (!dev->platform_data) {
596 struct device_node *core_node;
597
598 for_each_compatible_node(core_node, NULL, "vivante,gc") {
599 if (!of_device_is_available(core_node))
600 continue;
601
602 drm_of_component_match_add(&pdev->dev, &match,
603 compare_of, core_node);
604 }
605 } else {
606 char **names = dev->platform_data;
607 unsigned i;
608
609 for (i = 0; names[i]; i++)
610 component_match_add(dev, &match, compare_str, names[i]);
611 }
612
613 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
614 }
615
etnaviv_pdev_remove(struct platform_device * pdev)616 static int etnaviv_pdev_remove(struct platform_device *pdev)
617 {
618 component_master_del(&pdev->dev, &etnaviv_master_ops);
619
620 return 0;
621 }
622
623 static struct platform_driver etnaviv_platform_driver = {
624 .probe = etnaviv_pdev_probe,
625 .remove = etnaviv_pdev_remove,
626 .driver = {
627 .name = "etnaviv",
628 },
629 };
630
631 static struct platform_device *etnaviv_drm;
632
etnaviv_init(void)633 static int __init etnaviv_init(void)
634 {
635 struct platform_device *pdev;
636 int ret;
637 struct device_node *np;
638
639 etnaviv_validate_init();
640
641 ret = platform_driver_register(&etnaviv_gpu_driver);
642 if (ret != 0)
643 return ret;
644
645 ret = platform_driver_register(&etnaviv_platform_driver);
646 if (ret != 0)
647 goto unregister_gpu_driver;
648
649 /*
650 * If the DT contains at least one available GPU device, instantiate
651 * the DRM platform device.
652 */
653 for_each_compatible_node(np, NULL, "vivante,gc") {
654 if (!of_device_is_available(np))
655 continue;
656
657 pdev = platform_device_alloc("etnaviv", -1);
658 if (!pdev) {
659 ret = -ENOMEM;
660 of_node_put(np);
661 goto unregister_platform_driver;
662 }
663 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
664 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
665
666 /*
667 * Apply the same DMA configuration to the virtual etnaviv
668 * device as the GPU we found. This assumes that all Vivante
669 * GPUs in the system share the same DMA constraints.
670 */
671 of_dma_configure(&pdev->dev, np, true);
672
673 ret = platform_device_add(pdev);
674 if (ret) {
675 platform_device_put(pdev);
676 of_node_put(np);
677 goto unregister_platform_driver;
678 }
679
680 etnaviv_drm = pdev;
681 of_node_put(np);
682 break;
683 }
684
685 return 0;
686
687 unregister_platform_driver:
688 platform_driver_unregister(&etnaviv_platform_driver);
689 unregister_gpu_driver:
690 platform_driver_unregister(&etnaviv_gpu_driver);
691 return ret;
692 }
693 module_init(etnaviv_init);
694
etnaviv_exit(void)695 static void __exit etnaviv_exit(void)
696 {
697 platform_device_unregister(etnaviv_drm);
698 platform_driver_unregister(&etnaviv_platform_driver);
699 platform_driver_unregister(&etnaviv_gpu_driver);
700 }
701 module_exit(etnaviv_exit);
702
703 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
704 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
705 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
706 MODULE_DESCRIPTION("etnaviv DRM Driver");
707 MODULE_LICENSE("GPL v2");
708 MODULE_ALIAS("platform:etnaviv");
709