1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Tegra host1x driver
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8 #include <linux/clk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/io.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_device.h>
14 #include <linux/of.h>
15 #include <linux/slab.h>
16
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/host1x.h>
19 #undef CREATE_TRACE_POINTS
20
21 #include "bus.h"
22 #include "channel.h"
23 #include "debug.h"
24 #include "dev.h"
25 #include "intr.h"
26
27 #include "hw/host1x01.h"
28 #include "hw/host1x02.h"
29 #include "hw/host1x04.h"
30 #include "hw/host1x05.h"
31 #include "hw/host1x06.h"
32 #include "hw/host1x07.h"
33
host1x_hypervisor_writel(struct host1x * host1x,u32 v,u32 r)34 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
35 {
36 writel(v, host1x->hv_regs + r);
37 }
38
host1x_hypervisor_readl(struct host1x * host1x,u32 r)39 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
40 {
41 return readl(host1x->hv_regs + r);
42 }
43
host1x_sync_writel(struct host1x * host1x,u32 v,u32 r)44 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
45 {
46 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
47
48 writel(v, sync_regs + r);
49 }
50
host1x_sync_readl(struct host1x * host1x,u32 r)51 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
52 {
53 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
54
55 return readl(sync_regs + r);
56 }
57
host1x_ch_writel(struct host1x_channel * ch,u32 v,u32 r)58 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
59 {
60 writel(v, ch->regs + r);
61 }
62
host1x_ch_readl(struct host1x_channel * ch,u32 r)63 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
64 {
65 return readl(ch->regs + r);
66 }
67
68 static const struct host1x_info host1x01_info = {
69 .nb_channels = 8,
70 .nb_pts = 32,
71 .nb_mlocks = 16,
72 .nb_bases = 8,
73 .init = host1x01_init,
74 .sync_offset = 0x3000,
75 .dma_mask = DMA_BIT_MASK(32),
76 .has_wide_gather = false,
77 .has_hypervisor = false,
78 .num_sid_entries = 0,
79 .sid_table = NULL,
80 .reserve_vblank_syncpts = true,
81 };
82
83 static const struct host1x_info host1x02_info = {
84 .nb_channels = 9,
85 .nb_pts = 32,
86 .nb_mlocks = 16,
87 .nb_bases = 12,
88 .init = host1x02_init,
89 .sync_offset = 0x3000,
90 .dma_mask = DMA_BIT_MASK(32),
91 .has_wide_gather = false,
92 .has_hypervisor = false,
93 .num_sid_entries = 0,
94 .sid_table = NULL,
95 .reserve_vblank_syncpts = true,
96 };
97
98 static const struct host1x_info host1x04_info = {
99 .nb_channels = 12,
100 .nb_pts = 192,
101 .nb_mlocks = 16,
102 .nb_bases = 64,
103 .init = host1x04_init,
104 .sync_offset = 0x2100,
105 .dma_mask = DMA_BIT_MASK(34),
106 .has_wide_gather = false,
107 .has_hypervisor = false,
108 .num_sid_entries = 0,
109 .sid_table = NULL,
110 .reserve_vblank_syncpts = false,
111 };
112
113 static const struct host1x_info host1x05_info = {
114 .nb_channels = 14,
115 .nb_pts = 192,
116 .nb_mlocks = 16,
117 .nb_bases = 64,
118 .init = host1x05_init,
119 .sync_offset = 0x2100,
120 .dma_mask = DMA_BIT_MASK(34),
121 .has_wide_gather = false,
122 .has_hypervisor = false,
123 .num_sid_entries = 0,
124 .sid_table = NULL,
125 .reserve_vblank_syncpts = false,
126 };
127
128 static const struct host1x_sid_entry tegra186_sid_table[] = {
129 {
130 /* VIC */
131 .base = 0x1af0,
132 .offset = 0x30,
133 .limit = 0x34
134 },
135 };
136
137 static const struct host1x_info host1x06_info = {
138 .nb_channels = 63,
139 .nb_pts = 576,
140 .nb_mlocks = 24,
141 .nb_bases = 16,
142 .init = host1x06_init,
143 .sync_offset = 0x0,
144 .dma_mask = DMA_BIT_MASK(40),
145 .has_wide_gather = true,
146 .has_hypervisor = true,
147 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
148 .sid_table = tegra186_sid_table,
149 .reserve_vblank_syncpts = false,
150 };
151
152 static const struct host1x_sid_entry tegra194_sid_table[] = {
153 {
154 /* VIC */
155 .base = 0x1af0,
156 .offset = 0x30,
157 .limit = 0x34
158 },
159 };
160
161 static const struct host1x_info host1x07_info = {
162 .nb_channels = 63,
163 .nb_pts = 704,
164 .nb_mlocks = 32,
165 .nb_bases = 0,
166 .init = host1x07_init,
167 .sync_offset = 0x0,
168 .dma_mask = DMA_BIT_MASK(40),
169 .has_wide_gather = true,
170 .has_hypervisor = true,
171 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
172 .sid_table = tegra194_sid_table,
173 .reserve_vblank_syncpts = false,
174 };
175
176 static const struct of_device_id host1x_of_match[] = {
177 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
178 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
179 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
180 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
181 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
182 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
183 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
184 { },
185 };
186 MODULE_DEVICE_TABLE(of, host1x_of_match);
187
host1x_setup_sid_table(struct host1x * host)188 static void host1x_setup_sid_table(struct host1x *host)
189 {
190 const struct host1x_info *info = host->info;
191 unsigned int i;
192
193 for (i = 0; i < info->num_sid_entries; i++) {
194 const struct host1x_sid_entry *entry = &info->sid_table[i];
195
196 host1x_hypervisor_writel(host, entry->offset, entry->base);
197 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
198 }
199 }
200
host1x_wants_iommu(struct host1x * host1x)201 static bool host1x_wants_iommu(struct host1x *host1x)
202 {
203 /*
204 * If we support addressing a maximum of 32 bits of physical memory
205 * and if the host1x firewall is enabled, there's no need to enable
206 * IOMMU support. This can happen for example on Tegra20, Tegra30
207 * and Tegra114.
208 *
209 * Tegra124 and later can address up to 34 bits of physical memory and
210 * many platforms come equipped with more than 2 GiB of system memory,
211 * which requires crossing the 4 GiB boundary. But there's a catch: on
212 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
213 * only address up to 32 bits of memory in GATHER opcodes, which means
214 * that command buffers need to either be in the first 2 GiB of system
215 * memory (which could quickly lead to memory exhaustion), or command
216 * buffers need to be treated differently from other buffers (which is
217 * not possible with the current ABI).
218 *
219 * A third option is to use the IOMMU in these cases to make sure all
220 * buffers will be mapped into a 32-bit IOVA space that host1x can
221 * address. This allows all of the system memory to be used and works
222 * within the limitations of the host1x on these SoCs.
223 *
224 * In summary, default to enable IOMMU on Tegra124 and later. For any
225 * of the earlier SoCs, only use the IOMMU for additional safety when
226 * the host1x firewall is disabled.
227 */
228 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
229 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
230 return false;
231 }
232
233 return true;
234 }
235
host1x_iommu_attach(struct host1x * host)236 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
237 {
238 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
239 int err;
240
241 /*
242 * We may not always want to enable IOMMU support (for example if the
243 * host1x firewall is already enabled and we don't support addressing
244 * more than 32 bits of physical memory), so check for that first.
245 *
246 * Similarly, if host1x is already attached to an IOMMU (via the DMA
247 * API), don't try to attach again.
248 */
249 if (!host1x_wants_iommu(host) || domain)
250 return domain;
251
252 host->group = iommu_group_get(host->dev);
253 if (host->group) {
254 struct iommu_domain_geometry *geometry;
255 dma_addr_t start, end;
256 unsigned long order;
257
258 err = iova_cache_get();
259 if (err < 0)
260 goto put_group;
261
262 host->domain = iommu_domain_alloc(&platform_bus_type);
263 if (!host->domain) {
264 err = -ENOMEM;
265 goto put_cache;
266 }
267
268 err = iommu_attach_group(host->domain, host->group);
269 if (err) {
270 if (err == -ENODEV)
271 err = 0;
272
273 goto free_domain;
274 }
275
276 geometry = &host->domain->geometry;
277 start = geometry->aperture_start & host->info->dma_mask;
278 end = geometry->aperture_end & host->info->dma_mask;
279
280 order = __ffs(host->domain->pgsize_bitmap);
281 init_iova_domain(&host->iova, 1UL << order, start >> order);
282 host->iova_end = end;
283
284 domain = host->domain;
285 }
286
287 return domain;
288
289 free_domain:
290 iommu_domain_free(host->domain);
291 host->domain = NULL;
292 put_cache:
293 iova_cache_put();
294 put_group:
295 iommu_group_put(host->group);
296 host->group = NULL;
297
298 return ERR_PTR(err);
299 }
300
host1x_iommu_init(struct host1x * host)301 static int host1x_iommu_init(struct host1x *host)
302 {
303 u64 mask = host->info->dma_mask;
304 struct iommu_domain *domain;
305 int err;
306
307 domain = host1x_iommu_attach(host);
308 if (IS_ERR(domain)) {
309 err = PTR_ERR(domain);
310 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
311 return err;
312 }
313
314 /*
315 * If we're not behind an IOMMU make sure we don't get push buffers
316 * that are allocated outside of the range addressable by the GATHER
317 * opcode.
318 *
319 * Newer generations of Tegra (Tegra186 and later) support a wide
320 * variant of the GATHER opcode that allows addressing more bits.
321 */
322 if (!domain && !host->info->has_wide_gather)
323 mask = DMA_BIT_MASK(32);
324
325 err = dma_coerce_mask_and_coherent(host->dev, mask);
326 if (err < 0) {
327 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
328 return err;
329 }
330
331 return 0;
332 }
333
host1x_iommu_exit(struct host1x * host)334 static void host1x_iommu_exit(struct host1x *host)
335 {
336 if (host->domain) {
337 put_iova_domain(&host->iova);
338 iommu_detach_group(host->domain, host->group);
339
340 iommu_domain_free(host->domain);
341 host->domain = NULL;
342
343 iova_cache_put();
344
345 iommu_group_put(host->group);
346 host->group = NULL;
347 }
348 }
349
host1x_probe(struct platform_device * pdev)350 static int host1x_probe(struct platform_device *pdev)
351 {
352 struct host1x *host;
353 struct resource *regs, *hv_regs = NULL;
354 int syncpt_irq;
355 int err;
356
357 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
358 if (!host)
359 return -ENOMEM;
360
361 host->info = of_device_get_match_data(&pdev->dev);
362
363 if (host->info->has_hypervisor) {
364 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
365 if (!regs) {
366 dev_err(&pdev->dev, "failed to get vm registers\n");
367 return -ENXIO;
368 }
369
370 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
371 "hypervisor");
372 if (!hv_regs) {
373 dev_err(&pdev->dev,
374 "failed to get hypervisor registers\n");
375 return -ENXIO;
376 }
377 } else {
378 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
379 if (!regs) {
380 dev_err(&pdev->dev, "failed to get registers\n");
381 return -ENXIO;
382 }
383 }
384
385 syncpt_irq = platform_get_irq(pdev, 0);
386 if (syncpt_irq < 0)
387 return syncpt_irq;
388
389 mutex_init(&host->devices_lock);
390 INIT_LIST_HEAD(&host->devices);
391 INIT_LIST_HEAD(&host->list);
392 host->dev = &pdev->dev;
393
394 /* set common host1x device data */
395 platform_set_drvdata(pdev, host);
396
397 host->regs = devm_ioremap_resource(&pdev->dev, regs);
398 if (IS_ERR(host->regs))
399 return PTR_ERR(host->regs);
400
401 if (host->info->has_hypervisor) {
402 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
403 if (IS_ERR(host->hv_regs))
404 return PTR_ERR(host->hv_regs);
405 }
406
407 host->dev->dma_parms = &host->dma_parms;
408 dma_set_max_seg_size(host->dev, UINT_MAX);
409
410 if (host->info->init) {
411 err = host->info->init(host);
412 if (err)
413 return err;
414 }
415
416 host->clk = devm_clk_get(&pdev->dev, NULL);
417 if (IS_ERR(host->clk)) {
418 err = PTR_ERR(host->clk);
419
420 if (err != -EPROBE_DEFER)
421 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
422
423 return err;
424 }
425
426 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
427 if (IS_ERR(host->rst)) {
428 err = PTR_ERR(host->rst);
429 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
430 return err;
431 }
432
433 err = host1x_iommu_init(host);
434 if (err < 0) {
435 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
436 return err;
437 }
438
439 err = host1x_channel_list_init(&host->channel_list,
440 host->info->nb_channels);
441 if (err) {
442 dev_err(&pdev->dev, "failed to initialize channel list\n");
443 goto iommu_exit;
444 }
445
446 err = clk_prepare_enable(host->clk);
447 if (err < 0) {
448 dev_err(&pdev->dev, "failed to enable clock\n");
449 goto free_channels;
450 }
451
452 err = reset_control_deassert(host->rst);
453 if (err < 0) {
454 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
455 goto unprepare_disable;
456 }
457
458 err = host1x_syncpt_init(host);
459 if (err) {
460 dev_err(&pdev->dev, "failed to initialize syncpts\n");
461 goto reset_assert;
462 }
463
464 err = host1x_intr_init(host, syncpt_irq);
465 if (err) {
466 dev_err(&pdev->dev, "failed to initialize interrupts\n");
467 goto deinit_syncpt;
468 }
469
470 host1x_debug_init(host);
471
472 if (host->info->has_hypervisor)
473 host1x_setup_sid_table(host);
474
475 err = host1x_register(host);
476 if (err < 0)
477 goto deinit_debugfs;
478
479 err = devm_of_platform_populate(&pdev->dev);
480 if (err < 0)
481 goto unregister;
482
483 return 0;
484
485 unregister:
486 host1x_unregister(host);
487 deinit_debugfs:
488 host1x_debug_deinit(host);
489 host1x_intr_deinit(host);
490 deinit_syncpt:
491 host1x_syncpt_deinit(host);
492 reset_assert:
493 reset_control_assert(host->rst);
494 unprepare_disable:
495 clk_disable_unprepare(host->clk);
496 free_channels:
497 host1x_channel_list_free(&host->channel_list);
498 iommu_exit:
499 host1x_iommu_exit(host);
500
501 return err;
502 }
503
host1x_remove(struct platform_device * pdev)504 static int host1x_remove(struct platform_device *pdev)
505 {
506 struct host1x *host = platform_get_drvdata(pdev);
507
508 host1x_unregister(host);
509 host1x_debug_deinit(host);
510 host1x_intr_deinit(host);
511 host1x_syncpt_deinit(host);
512 reset_control_assert(host->rst);
513 clk_disable_unprepare(host->clk);
514 host1x_iommu_exit(host);
515
516 return 0;
517 }
518
519 static struct platform_driver tegra_host1x_driver = {
520 .driver = {
521 .name = "tegra-host1x",
522 .of_match_table = host1x_of_match,
523 },
524 .probe = host1x_probe,
525 .remove = host1x_remove,
526 };
527
528 static struct platform_driver * const drivers[] = {
529 &tegra_host1x_driver,
530 &tegra_mipi_driver,
531 };
532
tegra_host1x_init(void)533 static int __init tegra_host1x_init(void)
534 {
535 int err;
536
537 err = bus_register(&host1x_bus_type);
538 if (err < 0)
539 return err;
540
541 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
542 if (err < 0)
543 bus_unregister(&host1x_bus_type);
544
545 return err;
546 }
547 module_init(tegra_host1x_init);
548
tegra_host1x_exit(void)549 static void __exit tegra_host1x_exit(void)
550 {
551 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
552 bus_unregister(&host1x_bus_type);
553 }
554 module_exit(tegra_host1x_exit);
555
556 /**
557 * host1x_get_dma_mask() - query the supported DMA mask for host1x
558 * @host1x: host1x instance
559 *
560 * Note that this returns the supported DMA mask for host1x, which can be
561 * different from the applicable DMA mask under certain circumstances.
562 */
host1x_get_dma_mask(struct host1x * host1x)563 u64 host1x_get_dma_mask(struct host1x *host1x)
564 {
565 return host1x->info->dma_mask;
566 }
567 EXPORT_SYMBOL(host1x_get_dma_mask);
568
569 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
570 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
571 MODULE_DESCRIPTION("Host1x driver for Tegra products");
572 MODULE_LICENSE("GPL");
573