1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
12 #include <linux/io.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/of_iommu.h>
22
23 #include <asm/cacheflush.h>
24 #include <linux/sizes.h>
25
26 #include "msm_iommu_hw-8xxx.h"
27 #include "msm_iommu.h"
28
29 #define MRC(reg, processor, op1, crn, crm, op2) \
30 __asm__ __volatile__ ( \
31 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
32 : "=r" (reg))
33
34 /* bitmap of the page sizes currently supported */
35 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36
37 static DEFINE_SPINLOCK(msm_iommu_lock);
38 static LIST_HEAD(qcom_iommu_devices);
39 static struct iommu_ops msm_iommu_ops;
40
41 struct msm_priv {
42 struct list_head list_attached;
43 struct iommu_domain domain;
44 struct io_pgtable_cfg cfg;
45 struct io_pgtable_ops *iop;
46 struct device *dev;
47 spinlock_t pgtlock; /* pagetable lock */
48 };
49
to_msm_priv(struct iommu_domain * dom)50 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51 {
52 return container_of(dom, struct msm_priv, domain);
53 }
54
__enable_clocks(struct msm_iommu_dev * iommu)55 static int __enable_clocks(struct msm_iommu_dev *iommu)
56 {
57 int ret;
58
59 ret = clk_enable(iommu->pclk);
60 if (ret)
61 goto fail;
62
63 if (iommu->clk) {
64 ret = clk_enable(iommu->clk);
65 if (ret)
66 clk_disable(iommu->pclk);
67 }
68 fail:
69 return ret;
70 }
71
__disable_clocks(struct msm_iommu_dev * iommu)72 static void __disable_clocks(struct msm_iommu_dev *iommu)
73 {
74 if (iommu->clk)
75 clk_disable(iommu->clk);
76 clk_disable(iommu->pclk);
77 }
78
msm_iommu_reset(void __iomem * base,int ncb)79 static void msm_iommu_reset(void __iomem *base, int ncb)
80 {
81 int ctx;
82
83 SET_RPUE(base, 0);
84 SET_RPUEIE(base, 0);
85 SET_ESRRESTORE(base, 0);
86 SET_TBE(base, 0);
87 SET_CR(base, 0);
88 SET_SPDMBE(base, 0);
89 SET_TESTBUSCR(base, 0);
90 SET_TLBRSW(base, 0);
91 SET_GLOBAL_TLBIALL(base, 0);
92 SET_RPU_ACR(base, 0);
93 SET_TLBLKCRWE(base, 1);
94
95 for (ctx = 0; ctx < ncb; ctx++) {
96 SET_BPRCOSH(base, ctx, 0);
97 SET_BPRCISH(base, ctx, 0);
98 SET_BPRCNSH(base, ctx, 0);
99 SET_BPSHCFG(base, ctx, 0);
100 SET_BPMTCFG(base, ctx, 0);
101 SET_ACTLR(base, ctx, 0);
102 SET_SCTLR(base, ctx, 0);
103 SET_FSRRESTORE(base, ctx, 0);
104 SET_TTBR0(base, ctx, 0);
105 SET_TTBR1(base, ctx, 0);
106 SET_TTBCR(base, ctx, 0);
107 SET_BFBCR(base, ctx, 0);
108 SET_PAR(base, ctx, 0);
109 SET_FAR(base, ctx, 0);
110 SET_CTX_TLBIALL(base, ctx, 0);
111 SET_TLBFLPTER(base, ctx, 0);
112 SET_TLBSLPTER(base, ctx, 0);
113 SET_TLBLKCR(base, ctx, 0);
114 SET_CONTEXTIDR(base, ctx, 0);
115 }
116 }
117
__flush_iotlb(void * cookie)118 static void __flush_iotlb(void *cookie)
119 {
120 struct msm_priv *priv = cookie;
121 struct msm_iommu_dev *iommu = NULL;
122 struct msm_iommu_ctx_dev *master;
123 int ret = 0;
124
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 ret = __enable_clocks(iommu);
127 if (ret)
128 goto fail;
129
130 list_for_each_entry(master, &iommu->ctx_list, list)
131 SET_CTX_TLBIALL(iommu->base, master->num, 0);
132
133 __disable_clocks(iommu);
134 }
135 fail:
136 return;
137 }
138
__flush_iotlb_range(unsigned long iova,size_t size,size_t granule,bool leaf,void * cookie)139 static void __flush_iotlb_range(unsigned long iova, size_t size,
140 size_t granule, bool leaf, void *cookie)
141 {
142 struct msm_priv *priv = cookie;
143 struct msm_iommu_dev *iommu = NULL;
144 struct msm_iommu_ctx_dev *master;
145 int ret = 0;
146 int temp_size;
147
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 ret = __enable_clocks(iommu);
150 if (ret)
151 goto fail;
152
153 list_for_each_entry(master, &iommu->ctx_list, list) {
154 temp_size = size;
155 do {
156 iova &= TLBIVA_VA;
157 iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 master->num);
159 SET_TLBIVA(iommu->base, master->num, iova);
160 iova += granule;
161 } while (temp_size -= granule);
162 }
163
164 __disable_clocks(iommu);
165 }
166
167 fail:
168 return;
169 }
170
__flush_iotlb_walk(unsigned long iova,size_t size,size_t granule,void * cookie)171 static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 size_t granule, void *cookie)
173 {
174 __flush_iotlb_range(iova, size, granule, false, cookie);
175 }
176
__flush_iotlb_leaf(unsigned long iova,size_t size,size_t granule,void * cookie)177 static void __flush_iotlb_leaf(unsigned long iova, size_t size,
178 size_t granule, void *cookie)
179 {
180 __flush_iotlb_range(iova, size, granule, true, cookie);
181 }
182
__flush_iotlb_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)183 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
184 unsigned long iova, size_t granule, void *cookie)
185 {
186 __flush_iotlb_range(iova, granule, granule, true, cookie);
187 }
188
189 static const struct iommu_flush_ops msm_iommu_flush_ops = {
190 .tlb_flush_all = __flush_iotlb,
191 .tlb_flush_walk = __flush_iotlb_walk,
192 .tlb_flush_leaf = __flush_iotlb_leaf,
193 .tlb_add_page = __flush_iotlb_page,
194 };
195
msm_iommu_alloc_ctx(unsigned long * map,int start,int end)196 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
197 {
198 int idx;
199
200 do {
201 idx = find_next_zero_bit(map, end, start);
202 if (idx == end)
203 return -ENOSPC;
204 } while (test_and_set_bit(idx, map));
205
206 return idx;
207 }
208
msm_iommu_free_ctx(unsigned long * map,int idx)209 static void msm_iommu_free_ctx(unsigned long *map, int idx)
210 {
211 clear_bit(idx, map);
212 }
213
config_mids(struct msm_iommu_dev * iommu,struct msm_iommu_ctx_dev * master)214 static void config_mids(struct msm_iommu_dev *iommu,
215 struct msm_iommu_ctx_dev *master)
216 {
217 int mid, ctx, i;
218
219 for (i = 0; i < master->num_mids; i++) {
220 mid = master->mids[i];
221 ctx = master->num;
222
223 SET_M2VCBR_N(iommu->base, mid, 0);
224 SET_CBACR_N(iommu->base, ctx, 0);
225
226 /* Set VMID = 0 */
227 SET_VMID(iommu->base, mid, 0);
228
229 /* Set the context number for that MID to this context */
230 SET_CBNDX(iommu->base, mid, ctx);
231
232 /* Set MID associated with this context bank to 0*/
233 SET_CBVMID(iommu->base, ctx, 0);
234
235 /* Set the ASID for TLB tagging for this context */
236 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
237
238 /* Set security bit override to be Non-secure */
239 SET_NSCFG(iommu->base, mid, 3);
240 }
241 }
242
__reset_context(void __iomem * base,int ctx)243 static void __reset_context(void __iomem *base, int ctx)
244 {
245 SET_BPRCOSH(base, ctx, 0);
246 SET_BPRCISH(base, ctx, 0);
247 SET_BPRCNSH(base, ctx, 0);
248 SET_BPSHCFG(base, ctx, 0);
249 SET_BPMTCFG(base, ctx, 0);
250 SET_ACTLR(base, ctx, 0);
251 SET_SCTLR(base, ctx, 0);
252 SET_FSRRESTORE(base, ctx, 0);
253 SET_TTBR0(base, ctx, 0);
254 SET_TTBR1(base, ctx, 0);
255 SET_TTBCR(base, ctx, 0);
256 SET_BFBCR(base, ctx, 0);
257 SET_PAR(base, ctx, 0);
258 SET_FAR(base, ctx, 0);
259 SET_CTX_TLBIALL(base, ctx, 0);
260 SET_TLBFLPTER(base, ctx, 0);
261 SET_TLBSLPTER(base, ctx, 0);
262 SET_TLBLKCR(base, ctx, 0);
263 }
264
__program_context(void __iomem * base,int ctx,struct msm_priv * priv)265 static void __program_context(void __iomem *base, int ctx,
266 struct msm_priv *priv)
267 {
268 __reset_context(base, ctx);
269
270 /* Turn on TEX Remap */
271 SET_TRE(base, ctx, 1);
272 SET_AFE(base, ctx, 1);
273
274 /* Set up HTW mode */
275 /* TLB miss configuration: perform HTW on miss */
276 SET_TLBMCFG(base, ctx, 0x3);
277
278 /* V2P configuration: HTW for access */
279 SET_V2PCFG(base, ctx, 0x3);
280
281 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
282 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
283 SET_TTBR1(base, ctx, 0);
284
285 /* Set prrr and nmrr */
286 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
287 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
288
289 /* Invalidate the TLB for this context */
290 SET_CTX_TLBIALL(base, ctx, 0);
291
292 /* Set interrupt number to "secure" interrupt */
293 SET_IRPTNDX(base, ctx, 0);
294
295 /* Enable context fault interrupt */
296 SET_CFEIE(base, ctx, 1);
297
298 /* Stall access on a context fault and let the handler deal with it */
299 SET_CFCFG(base, ctx, 1);
300
301 /* Redirect all cacheable requests to L2 slave port. */
302 SET_RCISH(base, ctx, 1);
303 SET_RCOSH(base, ctx, 1);
304 SET_RCNSH(base, ctx, 1);
305
306 /* Turn on BFB prefetch */
307 SET_BFBDFE(base, ctx, 1);
308
309 /* Enable the MMU */
310 SET_M(base, ctx, 1);
311 }
312
msm_iommu_domain_alloc(unsigned type)313 static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
314 {
315 struct msm_priv *priv;
316
317 if (type != IOMMU_DOMAIN_UNMANAGED)
318 return NULL;
319
320 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
321 if (!priv)
322 goto fail_nomem;
323
324 INIT_LIST_HEAD(&priv->list_attached);
325
326 priv->domain.geometry.aperture_start = 0;
327 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
328 priv->domain.geometry.force_aperture = true;
329
330 return &priv->domain;
331
332 fail_nomem:
333 kfree(priv);
334 return NULL;
335 }
336
msm_iommu_domain_free(struct iommu_domain * domain)337 static void msm_iommu_domain_free(struct iommu_domain *domain)
338 {
339 struct msm_priv *priv;
340 unsigned long flags;
341
342 spin_lock_irqsave(&msm_iommu_lock, flags);
343 priv = to_msm_priv(domain);
344 kfree(priv);
345 spin_unlock_irqrestore(&msm_iommu_lock, flags);
346 }
347
msm_iommu_domain_config(struct msm_priv * priv)348 static int msm_iommu_domain_config(struct msm_priv *priv)
349 {
350 spin_lock_init(&priv->pgtlock);
351
352 priv->cfg = (struct io_pgtable_cfg) {
353 .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
354 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
355 .ias = 32,
356 .oas = 32,
357 .tlb = &msm_iommu_flush_ops,
358 .iommu_dev = priv->dev,
359 };
360
361 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
362 if (!priv->iop) {
363 dev_err(priv->dev, "Failed to allocate pgtable\n");
364 return -EINVAL;
365 }
366
367 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
368
369 return 0;
370 }
371
372 /* Must be called under msm_iommu_lock */
find_iommu_for_dev(struct device * dev)373 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
374 {
375 struct msm_iommu_dev *iommu, *ret = NULL;
376 struct msm_iommu_ctx_dev *master;
377
378 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
379 master = list_first_entry(&iommu->ctx_list,
380 struct msm_iommu_ctx_dev,
381 list);
382 if (master->of_node == dev->of_node) {
383 ret = iommu;
384 break;
385 }
386 }
387
388 return ret;
389 }
390
msm_iommu_probe_device(struct device * dev)391 static struct iommu_device *msm_iommu_probe_device(struct device *dev)
392 {
393 struct msm_iommu_dev *iommu;
394 unsigned long flags;
395
396 spin_lock_irqsave(&msm_iommu_lock, flags);
397 iommu = find_iommu_for_dev(dev);
398 spin_unlock_irqrestore(&msm_iommu_lock, flags);
399
400 if (!iommu)
401 return ERR_PTR(-ENODEV);
402
403 return &iommu->iommu;
404 }
405
msm_iommu_release_device(struct device * dev)406 static void msm_iommu_release_device(struct device *dev)
407 {
408 }
409
msm_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)410 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
411 {
412 int ret = 0;
413 unsigned long flags;
414 struct msm_iommu_dev *iommu;
415 struct msm_priv *priv = to_msm_priv(domain);
416 struct msm_iommu_ctx_dev *master;
417
418 priv->dev = dev;
419 msm_iommu_domain_config(priv);
420
421 spin_lock_irqsave(&msm_iommu_lock, flags);
422 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
423 master = list_first_entry(&iommu->ctx_list,
424 struct msm_iommu_ctx_dev,
425 list);
426 if (master->of_node == dev->of_node) {
427 ret = __enable_clocks(iommu);
428 if (ret)
429 goto fail;
430
431 list_for_each_entry(master, &iommu->ctx_list, list) {
432 if (master->num) {
433 dev_err(dev, "domain already attached");
434 ret = -EEXIST;
435 goto fail;
436 }
437 master->num =
438 msm_iommu_alloc_ctx(iommu->context_map,
439 0, iommu->ncb);
440 if (IS_ERR_VALUE(master->num)) {
441 ret = -ENODEV;
442 goto fail;
443 }
444 config_mids(iommu, master);
445 __program_context(iommu->base, master->num,
446 priv);
447 }
448 __disable_clocks(iommu);
449 list_add(&iommu->dom_node, &priv->list_attached);
450 }
451 }
452
453 fail:
454 spin_unlock_irqrestore(&msm_iommu_lock, flags);
455
456 return ret;
457 }
458
msm_iommu_detach_dev(struct iommu_domain * domain,struct device * dev)459 static void msm_iommu_detach_dev(struct iommu_domain *domain,
460 struct device *dev)
461 {
462 struct msm_priv *priv = to_msm_priv(domain);
463 unsigned long flags;
464 struct msm_iommu_dev *iommu;
465 struct msm_iommu_ctx_dev *master;
466 int ret;
467
468 free_io_pgtable_ops(priv->iop);
469
470 spin_lock_irqsave(&msm_iommu_lock, flags);
471 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
472 ret = __enable_clocks(iommu);
473 if (ret)
474 goto fail;
475
476 list_for_each_entry(master, &iommu->ctx_list, list) {
477 msm_iommu_free_ctx(iommu->context_map, master->num);
478 __reset_context(iommu->base, master->num);
479 }
480 __disable_clocks(iommu);
481 }
482 fail:
483 spin_unlock_irqrestore(&msm_iommu_lock, flags);
484 }
485
msm_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t pa,size_t len,int prot,gfp_t gfp)486 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
487 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
488 {
489 struct msm_priv *priv = to_msm_priv(domain);
490 unsigned long flags;
491 int ret;
492
493 spin_lock_irqsave(&priv->pgtlock, flags);
494 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
495 spin_unlock_irqrestore(&priv->pgtlock, flags);
496
497 return ret;
498 }
499
msm_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t len,struct iommu_iotlb_gather * gather)500 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
501 size_t len, struct iommu_iotlb_gather *gather)
502 {
503 struct msm_priv *priv = to_msm_priv(domain);
504 unsigned long flags;
505
506 spin_lock_irqsave(&priv->pgtlock, flags);
507 len = priv->iop->unmap(priv->iop, iova, len, gather);
508 spin_unlock_irqrestore(&priv->pgtlock, flags);
509
510 return len;
511 }
512
msm_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t va)513 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
514 dma_addr_t va)
515 {
516 struct msm_priv *priv;
517 struct msm_iommu_dev *iommu;
518 struct msm_iommu_ctx_dev *master;
519 unsigned int par;
520 unsigned long flags;
521 phys_addr_t ret = 0;
522
523 spin_lock_irqsave(&msm_iommu_lock, flags);
524
525 priv = to_msm_priv(domain);
526 iommu = list_first_entry(&priv->list_attached,
527 struct msm_iommu_dev, dom_node);
528
529 if (list_empty(&iommu->ctx_list))
530 goto fail;
531
532 master = list_first_entry(&iommu->ctx_list,
533 struct msm_iommu_ctx_dev, list);
534 if (!master)
535 goto fail;
536
537 ret = __enable_clocks(iommu);
538 if (ret)
539 goto fail;
540
541 /* Invalidate context TLB */
542 SET_CTX_TLBIALL(iommu->base, master->num, 0);
543 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
544
545 par = GET_PAR(iommu->base, master->num);
546
547 /* We are dealing with a supersection */
548 if (GET_NOFAULT_SS(iommu->base, master->num))
549 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
550 else /* Upper 20 bits from PAR, lower 12 from VA */
551 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
552
553 if (GET_FAULT(iommu->base, master->num))
554 ret = 0;
555
556 __disable_clocks(iommu);
557 fail:
558 spin_unlock_irqrestore(&msm_iommu_lock, flags);
559 return ret;
560 }
561
msm_iommu_capable(enum iommu_cap cap)562 static bool msm_iommu_capable(enum iommu_cap cap)
563 {
564 return false;
565 }
566
print_ctx_regs(void __iomem * base,int ctx)567 static void print_ctx_regs(void __iomem *base, int ctx)
568 {
569 unsigned int fsr = GET_FSR(base, ctx);
570 pr_err("FAR = %08x PAR = %08x\n",
571 GET_FAR(base, ctx), GET_PAR(base, ctx));
572 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
573 (fsr & 0x02) ? "TF " : "",
574 (fsr & 0x04) ? "AFF " : "",
575 (fsr & 0x08) ? "APF " : "",
576 (fsr & 0x10) ? "TLBMF " : "",
577 (fsr & 0x20) ? "HTWDEEF " : "",
578 (fsr & 0x40) ? "HTWSEEF " : "",
579 (fsr & 0x80) ? "MHF " : "",
580 (fsr & 0x10000) ? "SL " : "",
581 (fsr & 0x40000000) ? "SS " : "",
582 (fsr & 0x80000000) ? "MULTI " : "");
583
584 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
585 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
586 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
587 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
588 pr_err("SCTLR = %08x ACTLR = %08x\n",
589 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
590 }
591
insert_iommu_master(struct device * dev,struct msm_iommu_dev ** iommu,struct of_phandle_args * spec)592 static void insert_iommu_master(struct device *dev,
593 struct msm_iommu_dev **iommu,
594 struct of_phandle_args *spec)
595 {
596 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
597 int sid;
598
599 if (list_empty(&(*iommu)->ctx_list)) {
600 master = kzalloc(sizeof(*master), GFP_ATOMIC);
601 master->of_node = dev->of_node;
602 list_add(&master->list, &(*iommu)->ctx_list);
603 dev_iommu_priv_set(dev, master);
604 }
605
606 for (sid = 0; sid < master->num_mids; sid++)
607 if (master->mids[sid] == spec->args[0]) {
608 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
609 sid);
610 return;
611 }
612
613 master->mids[master->num_mids++] = spec->args[0];
614 }
615
qcom_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)616 static int qcom_iommu_of_xlate(struct device *dev,
617 struct of_phandle_args *spec)
618 {
619 struct msm_iommu_dev *iommu;
620 unsigned long flags;
621 int ret = 0;
622
623 spin_lock_irqsave(&msm_iommu_lock, flags);
624 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
625 if (iommu->dev->of_node == spec->np)
626 break;
627
628 if (!iommu || iommu->dev->of_node != spec->np) {
629 ret = -ENODEV;
630 goto fail;
631 }
632
633 insert_iommu_master(dev, &iommu, spec);
634 fail:
635 spin_unlock_irqrestore(&msm_iommu_lock, flags);
636
637 return ret;
638 }
639
msm_iommu_fault_handler(int irq,void * dev_id)640 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
641 {
642 struct msm_iommu_dev *iommu = dev_id;
643 unsigned int fsr;
644 int i, ret;
645
646 spin_lock(&msm_iommu_lock);
647
648 if (!iommu) {
649 pr_err("Invalid device ID in context interrupt handler\n");
650 goto fail;
651 }
652
653 pr_err("Unexpected IOMMU page fault!\n");
654 pr_err("base = %08x\n", (unsigned int)iommu->base);
655
656 ret = __enable_clocks(iommu);
657 if (ret)
658 goto fail;
659
660 for (i = 0; i < iommu->ncb; i++) {
661 fsr = GET_FSR(iommu->base, i);
662 if (fsr) {
663 pr_err("Fault occurred in context %d.\n", i);
664 pr_err("Interesting registers:\n");
665 print_ctx_regs(iommu->base, i);
666 SET_FSR(iommu->base, i, 0x4000000F);
667 }
668 }
669 __disable_clocks(iommu);
670 fail:
671 spin_unlock(&msm_iommu_lock);
672 return 0;
673 }
674
675 static struct iommu_ops msm_iommu_ops = {
676 .capable = msm_iommu_capable,
677 .domain_alloc = msm_iommu_domain_alloc,
678 .domain_free = msm_iommu_domain_free,
679 .attach_dev = msm_iommu_attach_dev,
680 .detach_dev = msm_iommu_detach_dev,
681 .map = msm_iommu_map,
682 .unmap = msm_iommu_unmap,
683 /*
684 * Nothing is needed here, the barrier to guarantee
685 * completion of the tlb sync operation is implicitly
686 * taken care when the iommu client does a writel before
687 * kick starting the other master.
688 */
689 .iotlb_sync = NULL,
690 .iova_to_phys = msm_iommu_iova_to_phys,
691 .probe_device = msm_iommu_probe_device,
692 .release_device = msm_iommu_release_device,
693 .device_group = generic_device_group,
694 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
695 .of_xlate = qcom_iommu_of_xlate,
696 };
697
msm_iommu_probe(struct platform_device * pdev)698 static int msm_iommu_probe(struct platform_device *pdev)
699 {
700 struct resource *r;
701 resource_size_t ioaddr;
702 struct msm_iommu_dev *iommu;
703 int ret, par, val;
704
705 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
706 if (!iommu)
707 return -ENODEV;
708
709 iommu->dev = &pdev->dev;
710 INIT_LIST_HEAD(&iommu->ctx_list);
711
712 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
713 if (IS_ERR(iommu->pclk)) {
714 dev_err(iommu->dev, "could not get smmu_pclk\n");
715 return PTR_ERR(iommu->pclk);
716 }
717
718 ret = clk_prepare(iommu->pclk);
719 if (ret) {
720 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
721 return ret;
722 }
723
724 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
725 if (IS_ERR(iommu->clk)) {
726 dev_err(iommu->dev, "could not get iommu_clk\n");
727 clk_unprepare(iommu->pclk);
728 return PTR_ERR(iommu->clk);
729 }
730
731 ret = clk_prepare(iommu->clk);
732 if (ret) {
733 dev_err(iommu->dev, "could not prepare iommu_clk\n");
734 clk_unprepare(iommu->pclk);
735 return ret;
736 }
737
738 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
739 iommu->base = devm_ioremap_resource(iommu->dev, r);
740 if (IS_ERR(iommu->base)) {
741 dev_err(iommu->dev, "could not get iommu base\n");
742 ret = PTR_ERR(iommu->base);
743 goto fail;
744 }
745 ioaddr = r->start;
746
747 iommu->irq = platform_get_irq(pdev, 0);
748 if (iommu->irq < 0) {
749 ret = -ENODEV;
750 goto fail;
751 }
752
753 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
754 if (ret) {
755 dev_err(iommu->dev, "could not get ncb\n");
756 goto fail;
757 }
758 iommu->ncb = val;
759
760 msm_iommu_reset(iommu->base, iommu->ncb);
761 SET_M(iommu->base, 0, 1);
762 SET_PAR(iommu->base, 0, 0);
763 SET_V2PCFG(iommu->base, 0, 1);
764 SET_V2PPR(iommu->base, 0, 0);
765 par = GET_PAR(iommu->base, 0);
766 SET_V2PCFG(iommu->base, 0, 0);
767 SET_M(iommu->base, 0, 0);
768
769 if (!par) {
770 pr_err("Invalid PAR value detected\n");
771 ret = -ENODEV;
772 goto fail;
773 }
774
775 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
776 msm_iommu_fault_handler,
777 IRQF_ONESHOT | IRQF_SHARED,
778 "msm_iommu_secure_irpt_handler",
779 iommu);
780 if (ret) {
781 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
782 goto fail;
783 }
784
785 list_add(&iommu->dev_node, &qcom_iommu_devices);
786
787 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
788 "msm-smmu.%pa", &ioaddr);
789 if (ret) {
790 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
791 goto fail;
792 }
793
794 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
795 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
796
797 ret = iommu_device_register(&iommu->iommu);
798 if (ret) {
799 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
800 goto fail;
801 }
802
803 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
804
805 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
806 iommu->base, iommu->irq, iommu->ncb);
807
808 return ret;
809 fail:
810 clk_unprepare(iommu->clk);
811 clk_unprepare(iommu->pclk);
812 return ret;
813 }
814
815 static const struct of_device_id msm_iommu_dt_match[] = {
816 { .compatible = "qcom,apq8064-iommu" },
817 {}
818 };
819
msm_iommu_remove(struct platform_device * pdev)820 static int msm_iommu_remove(struct platform_device *pdev)
821 {
822 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
823
824 clk_unprepare(iommu->clk);
825 clk_unprepare(iommu->pclk);
826 return 0;
827 }
828
829 static struct platform_driver msm_iommu_driver = {
830 .driver = {
831 .name = "msm_iommu",
832 .of_match_table = msm_iommu_dt_match,
833 },
834 .probe = msm_iommu_probe,
835 .remove = msm_iommu_remove,
836 };
837
msm_iommu_driver_init(void)838 static int __init msm_iommu_driver_init(void)
839 {
840 int ret;
841
842 ret = platform_driver_register(&msm_iommu_driver);
843 if (ret != 0)
844 pr_err("Failed to register IOMMU driver\n");
845
846 return ret;
847 }
848 subsys_initcall(msm_iommu_driver_init);
849
850