1 /*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/io.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/of_device.h>
25 #include <linux/of.h>
26 #include <linux/slab.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/host1x.h>
30 #undef CREATE_TRACE_POINTS
31
32 #include "bus.h"
33 #include "channel.h"
34 #include "debug.h"
35 #include "dev.h"
36 #include "intr.h"
37
38 #include "hw/host1x01.h"
39 #include "hw/host1x02.h"
40 #include "hw/host1x04.h"
41 #include "hw/host1x05.h"
42 #include "hw/host1x06.h"
43
host1x_hypervisor_writel(struct host1x * host1x,u32 v,u32 r)44 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
45 {
46 writel(v, host1x->hv_regs + r);
47 }
48
host1x_hypervisor_readl(struct host1x * host1x,u32 r)49 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
50 {
51 return readl(host1x->hv_regs + r);
52 }
53
host1x_sync_writel(struct host1x * host1x,u32 v,u32 r)54 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
55 {
56 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
57
58 writel(v, sync_regs + r);
59 }
60
host1x_sync_readl(struct host1x * host1x,u32 r)61 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
62 {
63 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
64
65 return readl(sync_regs + r);
66 }
67
host1x_ch_writel(struct host1x_channel * ch,u32 v,u32 r)68 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
69 {
70 writel(v, ch->regs + r);
71 }
72
host1x_ch_readl(struct host1x_channel * ch,u32 r)73 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
74 {
75 return readl(ch->regs + r);
76 }
77
78 static const struct host1x_info host1x01_info = {
79 .nb_channels = 8,
80 .nb_pts = 32,
81 .nb_mlocks = 16,
82 .nb_bases = 8,
83 .init = host1x01_init,
84 .sync_offset = 0x3000,
85 .dma_mask = DMA_BIT_MASK(32),
86 };
87
88 static const struct host1x_info host1x02_info = {
89 .nb_channels = 9,
90 .nb_pts = 32,
91 .nb_mlocks = 16,
92 .nb_bases = 12,
93 .init = host1x02_init,
94 .sync_offset = 0x3000,
95 .dma_mask = DMA_BIT_MASK(32),
96 };
97
98 static const struct host1x_info host1x04_info = {
99 .nb_channels = 12,
100 .nb_pts = 192,
101 .nb_mlocks = 16,
102 .nb_bases = 64,
103 .init = host1x04_init,
104 .sync_offset = 0x2100,
105 .dma_mask = DMA_BIT_MASK(34),
106 };
107
108 static const struct host1x_info host1x05_info = {
109 .nb_channels = 14,
110 .nb_pts = 192,
111 .nb_mlocks = 16,
112 .nb_bases = 64,
113 .init = host1x05_init,
114 .sync_offset = 0x2100,
115 .dma_mask = DMA_BIT_MASK(34),
116 };
117
118 static const struct host1x_info host1x06_info = {
119 .nb_channels = 63,
120 .nb_pts = 576,
121 .nb_mlocks = 24,
122 .nb_bases = 16,
123 .init = host1x06_init,
124 .sync_offset = 0x0,
125 .dma_mask = DMA_BIT_MASK(34),
126 .has_hypervisor = true,
127 };
128
129 static const struct of_device_id host1x_of_match[] = {
130 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
131 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
132 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
133 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
134 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
135 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
136 { },
137 };
138 MODULE_DEVICE_TABLE(of, host1x_of_match);
139
host1x_probe(struct platform_device * pdev)140 static int host1x_probe(struct platform_device *pdev)
141 {
142 struct host1x *host;
143 struct resource *regs, *hv_regs = NULL;
144 int syncpt_irq;
145 int err;
146
147 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
148 if (!host)
149 return -ENOMEM;
150
151 host->info = of_device_get_match_data(&pdev->dev);
152
153 if (host->info->has_hypervisor) {
154 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
155 if (!regs) {
156 dev_err(&pdev->dev, "failed to get vm registers\n");
157 return -ENXIO;
158 }
159
160 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
161 "hypervisor");
162 if (!hv_regs) {
163 dev_err(&pdev->dev,
164 "failed to get hypervisor registers\n");
165 return -ENXIO;
166 }
167 } else {
168 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
169 if (!regs) {
170 dev_err(&pdev->dev, "failed to get registers\n");
171 return -ENXIO;
172 }
173 }
174
175 syncpt_irq = platform_get_irq(pdev, 0);
176 if (syncpt_irq < 0) {
177 dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
178 return syncpt_irq;
179 }
180
181 mutex_init(&host->devices_lock);
182 INIT_LIST_HEAD(&host->devices);
183 INIT_LIST_HEAD(&host->list);
184 host->dev = &pdev->dev;
185
186 /* set common host1x device data */
187 platform_set_drvdata(pdev, host);
188
189 host->regs = devm_ioremap_resource(&pdev->dev, regs);
190 if (IS_ERR(host->regs))
191 return PTR_ERR(host->regs);
192
193 if (host->info->has_hypervisor) {
194 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
195 if (IS_ERR(host->hv_regs))
196 return PTR_ERR(host->hv_regs);
197 }
198
199 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
200
201 if (host->info->init) {
202 err = host->info->init(host);
203 if (err)
204 return err;
205 }
206
207 host->clk = devm_clk_get(&pdev->dev, NULL);
208 if (IS_ERR(host->clk)) {
209 dev_err(&pdev->dev, "failed to get clock\n");
210 err = PTR_ERR(host->clk);
211 return err;
212 }
213
214 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
215 if (IS_ERR(host->rst)) {
216 err = PTR_ERR(host->rst);
217 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
218 return err;
219 }
220
221 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
222 goto skip_iommu;
223
224 host->group = iommu_group_get(&pdev->dev);
225 if (host->group) {
226 struct iommu_domain_geometry *geometry;
227 unsigned long order;
228
229 err = iova_cache_get();
230 if (err < 0)
231 goto put_group;
232
233 host->domain = iommu_domain_alloc(&platform_bus_type);
234 if (!host->domain) {
235 err = -ENOMEM;
236 goto put_cache;
237 }
238
239 err = iommu_attach_group(host->domain, host->group);
240 if (err) {
241 if (err == -ENODEV) {
242 iommu_domain_free(host->domain);
243 host->domain = NULL;
244 iova_cache_put();
245 iommu_group_put(host->group);
246 host->group = NULL;
247 goto skip_iommu;
248 }
249
250 goto fail_free_domain;
251 }
252
253 geometry = &host->domain->geometry;
254
255 order = __ffs(host->domain->pgsize_bitmap);
256 init_iova_domain(&host->iova, 1UL << order,
257 geometry->aperture_start >> order);
258 host->iova_end = geometry->aperture_end;
259 }
260
261 skip_iommu:
262 err = host1x_channel_list_init(&host->channel_list,
263 host->info->nb_channels);
264 if (err) {
265 dev_err(&pdev->dev, "failed to initialize channel list\n");
266 goto fail_detach_device;
267 }
268
269 err = clk_prepare_enable(host->clk);
270 if (err < 0) {
271 dev_err(&pdev->dev, "failed to enable clock\n");
272 goto fail_free_channels;
273 }
274
275 err = reset_control_deassert(host->rst);
276 if (err < 0) {
277 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
278 goto fail_unprepare_disable;
279 }
280
281 err = host1x_syncpt_init(host);
282 if (err) {
283 dev_err(&pdev->dev, "failed to initialize syncpts\n");
284 goto fail_reset_assert;
285 }
286
287 err = host1x_intr_init(host, syncpt_irq);
288 if (err) {
289 dev_err(&pdev->dev, "failed to initialize interrupts\n");
290 goto fail_deinit_syncpt;
291 }
292
293 host1x_debug_init(host);
294
295 err = host1x_register(host);
296 if (err < 0)
297 goto fail_deinit_intr;
298
299 return 0;
300
301 fail_deinit_intr:
302 host1x_intr_deinit(host);
303 fail_deinit_syncpt:
304 host1x_syncpt_deinit(host);
305 fail_reset_assert:
306 reset_control_assert(host->rst);
307 fail_unprepare_disable:
308 clk_disable_unprepare(host->clk);
309 fail_free_channels:
310 host1x_channel_list_free(&host->channel_list);
311 fail_detach_device:
312 if (host->group && host->domain) {
313 put_iova_domain(&host->iova);
314 iommu_detach_group(host->domain, host->group);
315 }
316 fail_free_domain:
317 if (host->domain)
318 iommu_domain_free(host->domain);
319 put_cache:
320 if (host->group)
321 iova_cache_put();
322 put_group:
323 iommu_group_put(host->group);
324
325 return err;
326 }
327
host1x_remove(struct platform_device * pdev)328 static int host1x_remove(struct platform_device *pdev)
329 {
330 struct host1x *host = platform_get_drvdata(pdev);
331
332 host1x_unregister(host);
333 host1x_intr_deinit(host);
334 host1x_syncpt_deinit(host);
335 reset_control_assert(host->rst);
336 clk_disable_unprepare(host->clk);
337
338 if (host->domain) {
339 put_iova_domain(&host->iova);
340 iommu_detach_group(host->domain, host->group);
341 iommu_domain_free(host->domain);
342 iova_cache_put();
343 iommu_group_put(host->group);
344 }
345
346 return 0;
347 }
348
349 static struct platform_driver tegra_host1x_driver = {
350 .driver = {
351 .name = "tegra-host1x",
352 .of_match_table = host1x_of_match,
353 },
354 .probe = host1x_probe,
355 .remove = host1x_remove,
356 };
357
358 static struct platform_driver * const drivers[] = {
359 &tegra_host1x_driver,
360 &tegra_mipi_driver,
361 };
362
tegra_host1x_init(void)363 static int __init tegra_host1x_init(void)
364 {
365 int err;
366
367 err = bus_register(&host1x_bus_type);
368 if (err < 0)
369 return err;
370
371 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
372 if (err < 0)
373 bus_unregister(&host1x_bus_type);
374
375 return err;
376 }
377 module_init(tegra_host1x_init);
378
tegra_host1x_exit(void)379 static void __exit tegra_host1x_exit(void)
380 {
381 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
382 bus_unregister(&host1x_bus_type);
383 }
384 module_exit(tegra_host1x_exit);
385
386 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
387 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
388 MODULE_DESCRIPTION("Host1x driver for Tegra products");
389 MODULE_LICENSE("GPL");
390