1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include "msm_drv.h"
10 #include "msm_mmu.h"
11
12 struct msm_iommu {
13 struct msm_mmu base;
14 struct iommu_domain *domain;
15 atomic_t pagetables;
16 };
17
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19
20 struct msm_iommu_pagetable {
21 struct msm_mmu base;
22 struct msm_mmu *parent;
23 struct io_pgtable_ops *pgtbl_ops;
24 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
25 phys_addr_t ttbr;
26 u32 asid;
27 };
to_pagetable(struct msm_mmu * mmu)28 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
29 {
30 return container_of(mmu, struct msm_iommu_pagetable, base);
31 }
32
33 /* based on iommu_pgsize() in iommu.c: */
calc_pgsize(struct msm_iommu_pagetable * pagetable,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)34 static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
35 unsigned long iova, phys_addr_t paddr,
36 size_t size, size_t *count)
37 {
38 unsigned int pgsize_idx, pgsize_idx_next;
39 unsigned long pgsizes;
40 size_t offset, pgsize, pgsize_next;
41 unsigned long addr_merge = paddr | iova;
42
43 /* Page sizes supported by the hardware and small enough for @size */
44 pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
45
46 /* Constrain the page sizes further based on the maximum alignment */
47 if (likely(addr_merge))
48 pgsizes &= GENMASK(__ffs(addr_merge), 0);
49
50 /* Make sure we have at least one suitable page size */
51 BUG_ON(!pgsizes);
52
53 /* Pick the biggest page size remaining */
54 pgsize_idx = __fls(pgsizes);
55 pgsize = BIT(pgsize_idx);
56 if (!count)
57 return pgsize;
58
59 /* Find the next biggest support page size, if it exists */
60 pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
61 if (!pgsizes)
62 goto out_set_count;
63
64 pgsize_idx_next = __ffs(pgsizes);
65 pgsize_next = BIT(pgsize_idx_next);
66
67 /*
68 * There's no point trying a bigger page size unless the virtual
69 * and physical addresses are similarly offset within the larger page.
70 */
71 if ((iova ^ paddr) & (pgsize_next - 1))
72 goto out_set_count;
73
74 /* Calculate the offset to the next page size alignment boundary */
75 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
76
77 /*
78 * If size is big enough to accommodate the larger page, reduce
79 * the number of smaller pages.
80 */
81 if (offset + pgsize_next <= size)
82 size = offset;
83
84 out_set_count:
85 *count = size >> pgsize_idx;
86 return pgsize;
87 }
88
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)89 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
90 size_t size)
91 {
92 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
93 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
94
95 while (size) {
96 size_t unmapped, pgsize, count;
97
98 pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
99
100 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
101 if (!unmapped)
102 break;
103
104 iova += unmapped;
105 size -= unmapped;
106 }
107
108 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
109
110 return (size == 0) ? 0 : -EINVAL;
111 }
112
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t len,int prot)113 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
114 struct sg_table *sgt, size_t len, int prot)
115 {
116 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
117 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
118 struct scatterlist *sg;
119 u64 addr = iova;
120 unsigned int i;
121
122 for_each_sgtable_sg(sgt, sg, i) {
123 size_t size = sg->length;
124 phys_addr_t phys = sg_phys(sg);
125
126 while (size) {
127 size_t pgsize, count, mapped = 0;
128 int ret;
129
130 pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
131
132 ret = ops->map_pages(ops, addr, phys, pgsize, count,
133 prot, GFP_KERNEL, &mapped);
134
135 /* map_pages could fail after mapping some of the pages,
136 * so update the counters before error handling.
137 */
138 phys += mapped;
139 addr += mapped;
140 size -= mapped;
141
142 if (ret) {
143 msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
144 return -EINVAL;
145 }
146 }
147 }
148
149 return 0;
150 }
151
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)152 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
153 {
154 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
155 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
156 struct adreno_smmu_priv *adreno_smmu =
157 dev_get_drvdata(pagetable->parent->dev);
158
159 /*
160 * If this is the last attached pagetable for the parent,
161 * disable TTBR0 in the arm-smmu driver
162 */
163 if (atomic_dec_return(&iommu->pagetables) == 0)
164 adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
165
166 free_io_pgtable_ops(pagetable->pgtbl_ops);
167 kfree(pagetable);
168 }
169
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)170 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
171 phys_addr_t *ttbr, int *asid)
172 {
173 struct msm_iommu_pagetable *pagetable;
174
175 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
176 return -EINVAL;
177
178 pagetable = to_pagetable(mmu);
179
180 if (ttbr)
181 *ttbr = pagetable->ttbr;
182
183 if (asid)
184 *asid = pagetable->asid;
185
186 return 0;
187 }
188
msm_iommu_get_geometry(struct msm_mmu * mmu)189 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
190 {
191 struct msm_iommu *iommu = to_msm_iommu(mmu);
192
193 return &iommu->domain->geometry;
194 }
195
196 static const struct msm_mmu_funcs pagetable_funcs = {
197 .map = msm_iommu_pagetable_map,
198 .unmap = msm_iommu_pagetable_unmap,
199 .destroy = msm_iommu_pagetable_destroy,
200 };
201
msm_iommu_tlb_flush_all(void * cookie)202 static void msm_iommu_tlb_flush_all(void *cookie)
203 {
204 }
205
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)206 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
207 size_t granule, void *cookie)
208 {
209 }
210
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)211 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
212 unsigned long iova, size_t granule, void *cookie)
213 {
214 }
215
216 static const struct iommu_flush_ops null_tlb_ops = {
217 .tlb_flush_all = msm_iommu_tlb_flush_all,
218 .tlb_flush_walk = msm_iommu_tlb_flush_walk,
219 .tlb_add_page = msm_iommu_tlb_add_page,
220 };
221
222 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
223 unsigned long iova, int flags, void *arg);
224
msm_iommu_pagetable_create(struct msm_mmu * parent)225 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
226 {
227 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
228 struct msm_iommu *iommu = to_msm_iommu(parent);
229 struct msm_iommu_pagetable *pagetable;
230 const struct io_pgtable_cfg *ttbr1_cfg = NULL;
231 struct io_pgtable_cfg ttbr0_cfg;
232 int ret;
233
234 /* Get the pagetable configuration from the domain */
235 if (adreno_smmu->cookie)
236 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
237
238 /*
239 * If you hit this WARN_ONCE() you are probably missing an entry in
240 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
241 */
242 if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
243 return ERR_PTR(-ENODEV);
244
245 pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
246 if (!pagetable)
247 return ERR_PTR(-ENOMEM);
248
249 msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
250 MSM_MMU_IOMMU_PAGETABLE);
251
252 /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
253 ttbr0_cfg = *ttbr1_cfg;
254
255 /* The incoming cfg will have the TTBR1 quirk enabled */
256 ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
257 ttbr0_cfg.tlb = &null_tlb_ops;
258
259 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
260 &ttbr0_cfg, iommu->domain);
261
262 if (!pagetable->pgtbl_ops) {
263 kfree(pagetable);
264 return ERR_PTR(-ENOMEM);
265 }
266
267 /*
268 * If this is the first pagetable that we've allocated, send it back to
269 * the arm-smmu driver as a trigger to set up TTBR0
270 */
271 if (atomic_inc_return(&iommu->pagetables) == 1) {
272 ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
273 if (ret) {
274 free_io_pgtable_ops(pagetable->pgtbl_ops);
275 kfree(pagetable);
276 return ERR_PTR(ret);
277 }
278 }
279
280 /* Needed later for TLB flush */
281 pagetable->parent = parent;
282 pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
283 pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
284
285 /*
286 * TODO we would like each set of page tables to have a unique ASID
287 * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
288 * end up flushing the ASID used for TTBR1 pagetables, which is not
289 * what we want. So for now just use the same ASID as TTBR1.
290 */
291 pagetable->asid = 0;
292
293 return &pagetable->base;
294 }
295
msm_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)296 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
297 unsigned long iova, int flags, void *arg)
298 {
299 struct msm_iommu *iommu = arg;
300 struct msm_mmu *mmu = &iommu->base;
301 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
302 struct adreno_smmu_fault_info info, *ptr = NULL;
303
304 if (adreno_smmu->get_fault_info) {
305 adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
306 ptr = &info;
307 }
308
309 if (iommu->base.handler)
310 return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
311
312 pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
313
314 if (mmu->funcs->resume_translation)
315 mmu->funcs->resume_translation(mmu);
316
317 return 0;
318 }
319
msm_iommu_resume_translation(struct msm_mmu * mmu)320 static void msm_iommu_resume_translation(struct msm_mmu *mmu)
321 {
322 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
323
324 if (adreno_smmu->resume_translation)
325 adreno_smmu->resume_translation(adreno_smmu->cookie, true);
326 }
327
msm_iommu_detach(struct msm_mmu * mmu)328 static void msm_iommu_detach(struct msm_mmu *mmu)
329 {
330 struct msm_iommu *iommu = to_msm_iommu(mmu);
331
332 iommu_detach_device(iommu->domain, mmu->dev);
333 }
334
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t len,int prot)335 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
336 struct sg_table *sgt, size_t len, int prot)
337 {
338 struct msm_iommu *iommu = to_msm_iommu(mmu);
339 size_t ret;
340
341 /* The arm-smmu driver expects the addresses to be sign extended */
342 if (iova & BIT_ULL(48))
343 iova |= GENMASK_ULL(63, 49);
344
345 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
346 WARN_ON(!ret);
347
348 return (ret == len) ? 0 : -EINVAL;
349 }
350
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)351 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
352 {
353 struct msm_iommu *iommu = to_msm_iommu(mmu);
354
355 if (iova & BIT_ULL(48))
356 iova |= GENMASK_ULL(63, 49);
357
358 iommu_unmap(iommu->domain, iova, len);
359
360 return 0;
361 }
362
msm_iommu_destroy(struct msm_mmu * mmu)363 static void msm_iommu_destroy(struct msm_mmu *mmu)
364 {
365 struct msm_iommu *iommu = to_msm_iommu(mmu);
366 iommu_domain_free(iommu->domain);
367 kfree(iommu);
368 }
369
370 static const struct msm_mmu_funcs funcs = {
371 .detach = msm_iommu_detach,
372 .map = msm_iommu_map,
373 .unmap = msm_iommu_unmap,
374 .destroy = msm_iommu_destroy,
375 .resume_translation = msm_iommu_resume_translation,
376 };
377
msm_iommu_new(struct device * dev,unsigned long quirks)378 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
379 {
380 struct iommu_domain *domain;
381 struct msm_iommu *iommu;
382 int ret;
383
384 domain = iommu_domain_alloc(dev->bus);
385 if (!domain)
386 return NULL;
387
388 iommu_set_pgtable_quirks(domain, quirks);
389
390 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
391 if (!iommu) {
392 iommu_domain_free(domain);
393 return ERR_PTR(-ENOMEM);
394 }
395
396 iommu->domain = domain;
397 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
398
399 atomic_set(&iommu->pagetables, 0);
400
401 ret = iommu_attach_device(iommu->domain, dev);
402 if (ret) {
403 iommu_domain_free(domain);
404 kfree(iommu);
405 return ERR_PTR(ret);
406 }
407
408 return &iommu->base;
409 }
410
msm_iommu_gpu_new(struct device * dev,struct msm_gpu * gpu,unsigned long quirks)411 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
412 {
413 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
414 struct msm_iommu *iommu;
415 struct msm_mmu *mmu;
416
417 mmu = msm_iommu_new(dev, quirks);
418 if (IS_ERR_OR_NULL(mmu))
419 return mmu;
420
421 iommu = to_msm_iommu(mmu);
422 iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
423
424 /* Enable stall on iommu fault: */
425 if (adreno_smmu->set_stall)
426 adreno_smmu->set_stall(adreno_smmu->cookie, true);
427
428 return mmu;
429 }
430