1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup:
6 *
7 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
8 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 *
10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
11 */
12
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/memblock.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/pci.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/crash_dump.h>
23 #include <linux/memory.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/iommu.h>
27 #include <linux/rculist.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/rtas.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/firmware.h>
35 #include <asm/tce.h>
36 #include <asm/ppc-pci.h>
37 #include <asm/udbg.h>
38 #include <asm/mmzone.h>
39 #include <asm/plpar_wrappers.h>
40
41 #include "pseries.h"
42
43 enum {
44 DDW_QUERY_PE_DMA_WIN = 0,
45 DDW_CREATE_PE_DMA_WIN = 1,
46 DDW_REMOVE_PE_DMA_WIN = 2,
47
48 DDW_APPLICABLE_SIZE
49 };
50
51 enum {
52 DDW_EXT_SIZE = 0,
53 DDW_EXT_RESET_DMA_WIN = 1,
54 DDW_EXT_QUERY_OUT_SIZE = 2
55 };
56
iommu_pseries_alloc_table(int node)57 static struct iommu_table *iommu_pseries_alloc_table(int node)
58 {
59 struct iommu_table *tbl;
60
61 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
62 if (!tbl)
63 return NULL;
64
65 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
66 kref_init(&tbl->it_kref);
67 return tbl;
68 }
69
iommu_pseries_alloc_group(int node)70 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
71 {
72 struct iommu_table_group *table_group;
73
74 table_group = kzalloc_node(sizeof(*table_group), GFP_KERNEL, node);
75 if (!table_group)
76 return NULL;
77
78 #ifdef CONFIG_IOMMU_API
79 table_group->ops = &spapr_tce_table_group_ops;
80 table_group->pgsizes = SZ_4K;
81 #endif
82
83 table_group->tables[0] = iommu_pseries_alloc_table(node);
84 if (table_group->tables[0])
85 return table_group;
86
87 kfree(table_group);
88 return NULL;
89 }
90
iommu_pseries_free_group(struct iommu_table_group * table_group,const char * node_name)91 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
92 const char *node_name)
93 {
94 if (!table_group)
95 return;
96
97 #ifdef CONFIG_IOMMU_API
98 if (table_group->group) {
99 iommu_group_put(table_group->group);
100 BUG_ON(table_group->group);
101 }
102 #endif
103
104 /* Default DMA window table is at index 0, while DDW at 1. SR-IOV
105 * adapters only have table on index 1.
106 */
107 if (table_group->tables[0])
108 iommu_tce_table_put(table_group->tables[0]);
109
110 if (table_group->tables[1])
111 iommu_tce_table_put(table_group->tables[1]);
112
113 kfree(table_group);
114 }
115
tce_build_pSeries(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)116 static int tce_build_pSeries(struct iommu_table *tbl, long index,
117 long npages, unsigned long uaddr,
118 enum dma_data_direction direction,
119 unsigned long attrs)
120 {
121 u64 proto_tce;
122 __be64 *tcep;
123 u64 rpn;
124 const unsigned long tceshift = tbl->it_page_shift;
125 const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
126
127 proto_tce = TCE_PCI_READ; // Read allowed
128
129 if (direction != DMA_TO_DEVICE)
130 proto_tce |= TCE_PCI_WRITE;
131
132 tcep = ((__be64 *)tbl->it_base) + index;
133
134 while (npages--) {
135 /* can't move this out since we might cross MEMBLOCK boundary */
136 rpn = __pa(uaddr) >> tceshift;
137 *tcep = cpu_to_be64(proto_tce | rpn << tceshift);
138
139 uaddr += pagesize;
140 tcep++;
141 }
142 return 0;
143 }
144
145
tce_free_pSeries(struct iommu_table * tbl,long index,long npages)146 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
147 {
148 __be64 *tcep;
149
150 tcep = ((__be64 *)tbl->it_base) + index;
151
152 while (npages--)
153 *(tcep++) = 0;
154 }
155
tce_get_pseries(struct iommu_table * tbl,long index)156 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
157 {
158 __be64 *tcep;
159
160 tcep = ((__be64 *)tbl->it_base) + index;
161
162 return be64_to_cpu(*tcep);
163 }
164
165 static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
166 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
167
tce_build_pSeriesLP(unsigned long liobn,long tcenum,long tceshift,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)168 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
169 long npages, unsigned long uaddr,
170 enum dma_data_direction direction,
171 unsigned long attrs)
172 {
173 u64 rc = 0;
174 u64 proto_tce, tce;
175 u64 rpn;
176 int ret = 0;
177 long tcenum_start = tcenum, npages_start = npages;
178
179 rpn = __pa(uaddr) >> tceshift;
180 proto_tce = TCE_PCI_READ;
181 if (direction != DMA_TO_DEVICE)
182 proto_tce |= TCE_PCI_WRITE;
183
184 while (npages--) {
185 tce = proto_tce | rpn << tceshift;
186 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
187
188 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
189 ret = (int)rc;
190 tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
191 (npages_start - (npages + 1)));
192 break;
193 }
194
195 if (rc && printk_ratelimit()) {
196 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
197 printk("\tindex = 0x%llx\n", (u64)liobn);
198 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
199 printk("\ttce val = 0x%llx\n", tce );
200 dump_stack();
201 }
202
203 tcenum++;
204 rpn++;
205 }
206 return ret;
207 }
208
209 static DEFINE_PER_CPU(__be64 *, tce_page);
210
tce_buildmulti_pSeriesLP(struct iommu_table * tbl,long tcenum,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)211 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212 long npages, unsigned long uaddr,
213 enum dma_data_direction direction,
214 unsigned long attrs)
215 {
216 u64 rc = 0;
217 u64 proto_tce;
218 __be64 *tcep;
219 u64 rpn;
220 long l, limit;
221 long tcenum_start = tcenum, npages_start = npages;
222 int ret = 0;
223 unsigned long flags;
224 const unsigned long tceshift = tbl->it_page_shift;
225
226 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
227 return tce_build_pSeriesLP(tbl->it_index, tcenum,
228 tceshift, npages, uaddr,
229 direction, attrs);
230 }
231
232 local_irq_save(flags); /* to protect tcep and the page behind it */
233
234 tcep = __this_cpu_read(tce_page);
235
236 /* This is safe to do since interrupts are off when we're called
237 * from iommu_alloc{,_sg}()
238 */
239 if (!tcep) {
240 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
241 /* If allocation fails, fall back to the loop implementation */
242 if (!tcep) {
243 local_irq_restore(flags);
244 return tce_build_pSeriesLP(tbl->it_index, tcenum,
245 tceshift,
246 npages, uaddr, direction, attrs);
247 }
248 __this_cpu_write(tce_page, tcep);
249 }
250
251 rpn = __pa(uaddr) >> tceshift;
252 proto_tce = TCE_PCI_READ;
253 if (direction != DMA_TO_DEVICE)
254 proto_tce |= TCE_PCI_WRITE;
255
256 /* We can map max one pageful of TCEs at a time */
257 do {
258 /*
259 * Set up the page with TCE data, looping through and setting
260 * the values.
261 */
262 limit = min_t(long, npages, 4096 / TCE_ENTRY_SIZE);
263
264 for (l = 0; l < limit; l++) {
265 tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift);
266 rpn++;
267 }
268
269 rc = plpar_tce_put_indirect((u64)tbl->it_index,
270 (u64)tcenum << tceshift,
271 (u64)__pa(tcep),
272 limit);
273
274 npages -= limit;
275 tcenum += limit;
276 } while (npages > 0 && !rc);
277
278 local_irq_restore(flags);
279
280 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
281 ret = (int)rc;
282 tce_freemulti_pSeriesLP(tbl, tcenum_start,
283 (npages_start - (npages + limit)));
284 return ret;
285 }
286
287 if (rc && printk_ratelimit()) {
288 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
289 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
290 printk("\tnpages = 0x%llx\n", (u64)npages);
291 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
292 dump_stack();
293 }
294 return ret;
295 }
296
tce_free_pSeriesLP(unsigned long liobn,long tcenum,long tceshift,long npages)297 static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
298 long npages)
299 {
300 u64 rc;
301
302 while (npages--) {
303 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
304
305 if (rc && printk_ratelimit()) {
306 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
307 printk("\tindex = 0x%llx\n", (u64)liobn);
308 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
309 dump_stack();
310 }
311
312 tcenum++;
313 }
314 }
315
316
tce_freemulti_pSeriesLP(struct iommu_table * tbl,long tcenum,long npages)317 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
318 {
319 u64 rc;
320 long rpages = npages;
321 unsigned long limit;
322
323 if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
324 return tce_free_pSeriesLP(tbl->it_index, tcenum,
325 tbl->it_page_shift, npages);
326
327 do {
328 limit = min_t(unsigned long, rpages, 512);
329
330 rc = plpar_tce_stuff((u64)tbl->it_index,
331 (u64)tcenum << tbl->it_page_shift, 0, limit);
332
333 rpages -= limit;
334 tcenum += limit;
335 } while (rpages > 0 && !rc);
336
337 if (rc && printk_ratelimit()) {
338 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
339 printk("\trc = %lld\n", rc);
340 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
341 printk("\tnpages = 0x%llx\n", (u64)npages);
342 dump_stack();
343 }
344 }
345
tce_get_pSeriesLP(struct iommu_table * tbl,long tcenum)346 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
347 {
348 u64 rc;
349 unsigned long tce_ret;
350
351 rc = plpar_tce_get((u64)tbl->it_index,
352 (u64)tcenum << tbl->it_page_shift, &tce_ret);
353
354 if (rc && printk_ratelimit()) {
355 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
356 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
357 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
358 dump_stack();
359 }
360
361 return tce_ret;
362 }
363
364 /* this is compatible with cells for the device tree property */
365 struct dynamic_dma_window_prop {
366 __be32 liobn; /* tce table number */
367 __be64 dma_base; /* address hi,lo */
368 __be32 tce_shift; /* ilog2(tce_page_size) */
369 __be32 window_shift; /* ilog2(tce_window_size) */
370 };
371
372 struct dma_win {
373 struct device_node *device;
374 const struct dynamic_dma_window_prop *prop;
375 bool direct;
376 struct list_head list;
377 };
378
379 /* Dynamic DMA Window support */
380 struct ddw_query_response {
381 u32 windows_available;
382 u64 largest_available_block;
383 u32 page_size;
384 u32 migration_capable;
385 };
386
387 struct ddw_create_response {
388 u32 liobn;
389 u32 addr_hi;
390 u32 addr_lo;
391 };
392
393 static LIST_HEAD(dma_win_list);
394 /* prevents races between memory on/offline and window creation */
395 static DEFINE_SPINLOCK(dma_win_list_lock);
396 /* protects initializing window twice for same device */
397 static DEFINE_MUTEX(dma_win_init_mutex);
398
tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,unsigned long num_pfn,const void * arg)399 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
400 unsigned long num_pfn, const void *arg)
401 {
402 const struct dynamic_dma_window_prop *maprange = arg;
403 int rc;
404 u64 tce_size, num_tce, dma_offset, next;
405 u32 tce_shift;
406 long limit;
407
408 tce_shift = be32_to_cpu(maprange->tce_shift);
409 tce_size = 1ULL << tce_shift;
410 next = start_pfn << PAGE_SHIFT;
411 num_tce = num_pfn << PAGE_SHIFT;
412
413 /* round back to the beginning of the tce page size */
414 num_tce += next & (tce_size - 1);
415 next &= ~(tce_size - 1);
416
417 /* covert to number of tces */
418 num_tce |= tce_size - 1;
419 num_tce >>= tce_shift;
420
421 do {
422 /*
423 * Set up the page with TCE data, looping through and setting
424 * the values.
425 */
426 limit = min_t(long, num_tce, 512);
427 dma_offset = next + be64_to_cpu(maprange->dma_base);
428
429 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
430 dma_offset,
431 0, limit);
432 next += limit * tce_size;
433 num_tce -= limit;
434 } while (num_tce > 0 && !rc);
435
436 return rc;
437 }
438
tce_setrange_multi_pSeriesLP(unsigned long start_pfn,unsigned long num_pfn,const void * arg)439 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
440 unsigned long num_pfn, const void *arg)
441 {
442 const struct dynamic_dma_window_prop *maprange = arg;
443 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
444 __be64 *tcep;
445 u32 tce_shift;
446 u64 rc = 0;
447 long l, limit;
448
449 if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
450 unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
451 unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
452 be64_to_cpu(maprange->dma_base);
453 unsigned long tcenum = dmastart >> tceshift;
454 unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
455 void *uaddr = __va(start_pfn << PAGE_SHIFT);
456
457 return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
458 tcenum, tceshift, npages, (unsigned long) uaddr,
459 DMA_BIDIRECTIONAL, 0);
460 }
461
462 local_irq_disable(); /* to protect tcep and the page behind it */
463 tcep = __this_cpu_read(tce_page);
464
465 if (!tcep) {
466 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
467 if (!tcep) {
468 local_irq_enable();
469 return -ENOMEM;
470 }
471 __this_cpu_write(tce_page, tcep);
472 }
473
474 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
475
476 liobn = (u64)be32_to_cpu(maprange->liobn);
477 tce_shift = be32_to_cpu(maprange->tce_shift);
478 tce_size = 1ULL << tce_shift;
479 next = start_pfn << PAGE_SHIFT;
480 num_tce = num_pfn << PAGE_SHIFT;
481
482 /* round back to the beginning of the tce page size */
483 num_tce += next & (tce_size - 1);
484 next &= ~(tce_size - 1);
485
486 /* covert to number of tces */
487 num_tce |= tce_size - 1;
488 num_tce >>= tce_shift;
489
490 /* We can map max one pageful of TCEs at a time */
491 do {
492 /*
493 * Set up the page with TCE data, looping through and setting
494 * the values.
495 */
496 limit = min_t(long, num_tce, 4096 / TCE_ENTRY_SIZE);
497 dma_offset = next + be64_to_cpu(maprange->dma_base);
498
499 for (l = 0; l < limit; l++) {
500 tcep[l] = cpu_to_be64(proto_tce | next);
501 next += tce_size;
502 }
503
504 rc = plpar_tce_put_indirect(liobn,
505 dma_offset,
506 (u64)__pa(tcep),
507 limit);
508
509 num_tce -= limit;
510 } while (num_tce > 0 && !rc);
511
512 /* error cleanup: caller will clear whole range */
513
514 local_irq_enable();
515 return rc;
516 }
517
tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,unsigned long num_pfn,void * arg)518 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
519 unsigned long num_pfn, void *arg)
520 {
521 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
522 }
523
iommu_table_setparms_common(struct iommu_table * tbl,unsigned long busno,unsigned long liobn,unsigned long win_addr,unsigned long window_size,unsigned long page_shift,void * base,struct iommu_table_ops * table_ops)524 static void iommu_table_setparms_common(struct iommu_table *tbl, unsigned long busno,
525 unsigned long liobn, unsigned long win_addr,
526 unsigned long window_size, unsigned long page_shift,
527 void *base, struct iommu_table_ops *table_ops)
528 {
529 tbl->it_busno = busno;
530 tbl->it_index = liobn;
531 tbl->it_offset = win_addr >> page_shift;
532 tbl->it_size = window_size >> page_shift;
533 tbl->it_page_shift = page_shift;
534 tbl->it_base = (unsigned long)base;
535 tbl->it_blocksize = 16;
536 tbl->it_type = TCE_PCI;
537 tbl->it_ops = table_ops;
538 }
539
540 struct iommu_table_ops iommu_table_pseries_ops;
541
iommu_table_setparms(struct pci_controller * phb,struct device_node * dn,struct iommu_table * tbl)542 static void iommu_table_setparms(struct pci_controller *phb,
543 struct device_node *dn,
544 struct iommu_table *tbl)
545 {
546 struct device_node *node;
547 const unsigned long *basep;
548 const u32 *sizep;
549
550 /* Test if we are going over 2GB of DMA space */
551 if (phb->dma_window_base_cur + phb->dma_window_size > SZ_2G) {
552 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
553 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
554 }
555
556 node = phb->dn;
557 basep = of_get_property(node, "linux,tce-base", NULL);
558 sizep = of_get_property(node, "linux,tce-size", NULL);
559 if (basep == NULL || sizep == NULL) {
560 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
561 "missing tce entries !\n", dn);
562 return;
563 }
564
565 iommu_table_setparms_common(tbl, phb->bus->number, 0, phb->dma_window_base_cur,
566 phb->dma_window_size, IOMMU_PAGE_SHIFT_4K,
567 __va(*basep), &iommu_table_pseries_ops);
568
569 if (!is_kdump_kernel())
570 memset((void *)tbl->it_base, 0, *sizep);
571
572 phb->dma_window_base_cur += phb->dma_window_size;
573 }
574
575 struct iommu_table_ops iommu_table_lpar_multi_ops;
576
577 /*
578 * iommu_table_setparms_lpar
579 *
580 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
581 */
iommu_table_setparms_lpar(struct pci_controller * phb,struct device_node * dn,struct iommu_table * tbl,struct iommu_table_group * table_group,const __be32 * dma_window)582 static void iommu_table_setparms_lpar(struct pci_controller *phb,
583 struct device_node *dn,
584 struct iommu_table *tbl,
585 struct iommu_table_group *table_group,
586 const __be32 *dma_window)
587 {
588 unsigned long offset, size, liobn;
589
590 of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
591
592 iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
593 &iommu_table_lpar_multi_ops);
594
595
596 table_group->tce32_start = offset;
597 table_group->tce32_size = size;
598 }
599
600 struct iommu_table_ops iommu_table_pseries_ops = {
601 .set = tce_build_pSeries,
602 .clear = tce_free_pSeries,
603 .get = tce_get_pseries
604 };
605
pci_dma_bus_setup_pSeries(struct pci_bus * bus)606 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
607 {
608 struct device_node *dn;
609 struct iommu_table *tbl;
610 struct device_node *isa_dn, *isa_dn_orig;
611 struct device_node *tmp;
612 struct pci_dn *pci;
613 int children;
614
615 dn = pci_bus_to_OF_node(bus);
616
617 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
618
619 if (bus->self) {
620 /* This is not a root bus, any setup will be done for the
621 * device-side of the bridge in iommu_dev_setup_pSeries().
622 */
623 return;
624 }
625 pci = PCI_DN(dn);
626
627 /* Check if the ISA bus on the system is under
628 * this PHB.
629 */
630 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
631
632 while (isa_dn && isa_dn != dn)
633 isa_dn = isa_dn->parent;
634
635 of_node_put(isa_dn_orig);
636
637 /* Count number of direct PCI children of the PHB. */
638 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
639 children++;
640
641 pr_debug("Children: %d\n", children);
642
643 /* Calculate amount of DMA window per slot. Each window must be
644 * a power of two (due to pci_alloc_consistent requirements).
645 *
646 * Keep 256MB aside for PHBs with ISA.
647 */
648
649 if (!isa_dn) {
650 /* No ISA/IDE - just set window size and return */
651 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
652
653 while (pci->phb->dma_window_size * children > 0x80000000ul)
654 pci->phb->dma_window_size >>= 1;
655 pr_debug("No ISA/IDE, window size is 0x%llx\n",
656 pci->phb->dma_window_size);
657 pci->phb->dma_window_base_cur = 0;
658
659 return;
660 }
661
662 /* If we have ISA, then we probably have an IDE
663 * controller too. Allocate a 128MB table but
664 * skip the first 128MB to avoid stepping on ISA
665 * space.
666 */
667 pci->phb->dma_window_size = 0x8000000ul;
668 pci->phb->dma_window_base_cur = 0x8000000ul;
669
670 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
671 tbl = pci->table_group->tables[0];
672
673 iommu_table_setparms(pci->phb, dn, tbl);
674
675 if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
676 panic("Failed to initialize iommu table");
677
678 /* Divide the rest (1.75GB) among the children */
679 pci->phb->dma_window_size = 0x80000000ul;
680 while (pci->phb->dma_window_size * children > 0x70000000ul)
681 pci->phb->dma_window_size >>= 1;
682
683 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
684 }
685
686 #ifdef CONFIG_IOMMU_API
tce_exchange_pseries(struct iommu_table * tbl,long index,unsigned long * tce,enum dma_data_direction * direction)687 static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
688 long *tce, enum dma_data_direction *direction)
689 {
690 long rc;
691 unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
692 unsigned long flags, oldtce = 0;
693 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
694 unsigned long newtce = *tce | proto_tce;
695
696 spin_lock_irqsave(&tbl->large_pool.lock, flags);
697
698 rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
699 if (!rc)
700 rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
701
702 if (!rc) {
703 *direction = iommu_tce_direction(oldtce);
704 *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
705 }
706
707 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
708
709 return rc;
710 }
711 #endif
712
713 struct iommu_table_ops iommu_table_lpar_multi_ops = {
714 .set = tce_buildmulti_pSeriesLP,
715 #ifdef CONFIG_IOMMU_API
716 .xchg_no_kill = tce_exchange_pseries,
717 #endif
718 .clear = tce_freemulti_pSeriesLP,
719 .get = tce_get_pSeriesLP
720 };
721
722 /*
723 * Find nearest ibm,dma-window (default DMA window) or direct DMA window or
724 * dynamic 64bit DMA window, walking up the device tree.
725 */
pci_dma_find(struct device_node * dn,const __be32 ** dma_window)726 static struct device_node *pci_dma_find(struct device_node *dn,
727 const __be32 **dma_window)
728 {
729 const __be32 *dw = NULL;
730
731 for ( ; dn && PCI_DN(dn); dn = dn->parent) {
732 dw = of_get_property(dn, "ibm,dma-window", NULL);
733 if (dw) {
734 if (dma_window)
735 *dma_window = dw;
736 return dn;
737 }
738 dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
739 if (dw)
740 return dn;
741 dw = of_get_property(dn, DMA64_PROPNAME, NULL);
742 if (dw)
743 return dn;
744 }
745
746 return NULL;
747 }
748
pci_dma_bus_setup_pSeriesLP(struct pci_bus * bus)749 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
750 {
751 struct iommu_table *tbl;
752 struct device_node *dn, *pdn;
753 struct pci_dn *ppci;
754 const __be32 *dma_window = NULL;
755
756 dn = pci_bus_to_OF_node(bus);
757
758 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
759 dn);
760
761 pdn = pci_dma_find(dn, &dma_window);
762
763 if (dma_window == NULL)
764 pr_debug(" no ibm,dma-window property !\n");
765
766 ppci = PCI_DN(pdn);
767
768 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
769 pdn, ppci->table_group);
770
771 if (!ppci->table_group) {
772 ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
773 tbl = ppci->table_group->tables[0];
774 if (dma_window) {
775 iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
776 ppci->table_group, dma_window);
777
778 if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
779 panic("Failed to initialize iommu table");
780 }
781 iommu_register_group(ppci->table_group,
782 pci_domain_nr(bus), 0);
783 pr_debug(" created table: %p\n", ppci->table_group);
784 }
785 }
786
787
pci_dma_dev_setup_pSeries(struct pci_dev * dev)788 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
789 {
790 struct device_node *dn;
791 struct iommu_table *tbl;
792
793 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
794
795 dn = dev->dev.of_node;
796
797 /* If we're the direct child of a root bus, then we need to allocate
798 * an iommu table ourselves. The bus setup code should have setup
799 * the window sizes already.
800 */
801 if (!dev->bus->self) {
802 struct pci_controller *phb = PCI_DN(dn)->phb;
803
804 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
805 PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
806 tbl = PCI_DN(dn)->table_group->tables[0];
807 iommu_table_setparms(phb, dn, tbl);
808
809 if (!iommu_init_table(tbl, phb->node, 0, 0))
810 panic("Failed to initialize iommu table");
811
812 set_iommu_table_base(&dev->dev, tbl);
813 return;
814 }
815
816 /* If this device is further down the bus tree, search upwards until
817 * an already allocated iommu table is found and use that.
818 */
819
820 while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
821 dn = dn->parent;
822
823 if (dn && PCI_DN(dn))
824 set_iommu_table_base(&dev->dev,
825 PCI_DN(dn)->table_group->tables[0]);
826 else
827 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
828 pci_name(dev));
829 }
830
831 static int __read_mostly disable_ddw;
832
disable_ddw_setup(char * str)833 static int __init disable_ddw_setup(char *str)
834 {
835 disable_ddw = 1;
836 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
837
838 return 0;
839 }
840
841 early_param("disable_ddw", disable_ddw_setup);
842
clean_dma_window(struct device_node * np,struct dynamic_dma_window_prop * dwp)843 static void clean_dma_window(struct device_node *np, struct dynamic_dma_window_prop *dwp)
844 {
845 int ret;
846
847 ret = tce_clearrange_multi_pSeriesLP(0,
848 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
849 if (ret)
850 pr_warn("%pOF failed to clear tces in window.\n",
851 np);
852 else
853 pr_debug("%pOF successfully cleared tces in window.\n",
854 np);
855 }
856
857 /*
858 * Call only if DMA window is clean.
859 */
__remove_dma_window(struct device_node * np,u32 * ddw_avail,u64 liobn)860 static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liobn)
861 {
862 int ret;
863
864 ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn);
865 if (ret)
866 pr_warn("%pOF: failed to remove DMA window: rtas returned "
867 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
868 np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
869 else
870 pr_debug("%pOF: successfully removed DMA window: rtas returned "
871 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
872 np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
873 }
874
remove_dma_window(struct device_node * np,u32 * ddw_avail,struct property * win)875 static void remove_dma_window(struct device_node *np, u32 *ddw_avail,
876 struct property *win)
877 {
878 struct dynamic_dma_window_prop *dwp;
879 u64 liobn;
880
881 dwp = win->value;
882 liobn = (u64)be32_to_cpu(dwp->liobn);
883
884 clean_dma_window(np, dwp);
885 __remove_dma_window(np, ddw_avail, liobn);
886 }
887
remove_ddw(struct device_node * np,bool remove_prop,const char * win_name)888 static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name)
889 {
890 struct property *win;
891 u32 ddw_avail[DDW_APPLICABLE_SIZE];
892 int ret = 0;
893
894 win = of_find_property(np, win_name, NULL);
895 if (!win)
896 return -EINVAL;
897
898 ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
899 &ddw_avail[0], DDW_APPLICABLE_SIZE);
900 if (ret)
901 return 0;
902
903
904 if (win->length >= sizeof(struct dynamic_dma_window_prop))
905 remove_dma_window(np, ddw_avail, win);
906
907 if (!remove_prop)
908 return 0;
909
910 ret = of_remove_property(np, win);
911 if (ret)
912 pr_warn("%pOF: failed to remove DMA window property: %d\n",
913 np, ret);
914 return 0;
915 }
916
find_existing_ddw(struct device_node * pdn,u64 * dma_addr,int * window_shift)917 static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
918 {
919 struct dma_win *window;
920 const struct dynamic_dma_window_prop *dma64;
921 bool found = false;
922
923 spin_lock(&dma_win_list_lock);
924 /* check if we already created a window and dupe that config if so */
925 list_for_each_entry(window, &dma_win_list, list) {
926 if (window->device == pdn) {
927 dma64 = window->prop;
928 *dma_addr = be64_to_cpu(dma64->dma_base);
929 *window_shift = be32_to_cpu(dma64->window_shift);
930 found = true;
931 break;
932 }
933 }
934 spin_unlock(&dma_win_list_lock);
935
936 return found;
937 }
938
ddw_list_new_entry(struct device_node * pdn,const struct dynamic_dma_window_prop * dma64)939 static struct dma_win *ddw_list_new_entry(struct device_node *pdn,
940 const struct dynamic_dma_window_prop *dma64)
941 {
942 struct dma_win *window;
943
944 window = kzalloc(sizeof(*window), GFP_KERNEL);
945 if (!window)
946 return NULL;
947
948 window->device = pdn;
949 window->prop = dma64;
950 window->direct = false;
951
952 return window;
953 }
954
find_existing_ddw_windows_named(const char * name)955 static void find_existing_ddw_windows_named(const char *name)
956 {
957 int len;
958 struct device_node *pdn;
959 struct dma_win *window;
960 const struct dynamic_dma_window_prop *dma64;
961
962 for_each_node_with_property(pdn, name) {
963 dma64 = of_get_property(pdn, name, &len);
964 if (!dma64 || len < sizeof(*dma64)) {
965 remove_ddw(pdn, true, name);
966 continue;
967 }
968
969 window = ddw_list_new_entry(pdn, dma64);
970 if (!window) {
971 of_node_put(pdn);
972 break;
973 }
974
975 spin_lock(&dma_win_list_lock);
976 list_add(&window->list, &dma_win_list);
977 spin_unlock(&dma_win_list_lock);
978 }
979 }
980
find_existing_ddw_windows(void)981 static int find_existing_ddw_windows(void)
982 {
983 if (!firmware_has_feature(FW_FEATURE_LPAR))
984 return 0;
985
986 find_existing_ddw_windows_named(DIRECT64_PROPNAME);
987 find_existing_ddw_windows_named(DMA64_PROPNAME);
988
989 return 0;
990 }
991 machine_arch_initcall(pseries, find_existing_ddw_windows);
992
993 /**
994 * ddw_read_ext - Get the value of an DDW extension
995 * @np: device node from which the extension value is to be read.
996 * @extnum: index number of the extension.
997 * @value: pointer to return value, modified when extension is available.
998 *
999 * Checks if "ibm,ddw-extensions" exists for this node, and get the value
1000 * on index 'extnum'.
1001 * It can be used only to check if a property exists, passing value == NULL.
1002 *
1003 * Returns:
1004 * 0 if extension successfully read
1005 * -EINVAL if the "ibm,ddw-extensions" does not exist,
1006 * -ENODATA if "ibm,ddw-extensions" does not have a value, and
1007 * -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension.
1008 */
ddw_read_ext(const struct device_node * np,int extnum,u32 * value)1009 static inline int ddw_read_ext(const struct device_node *np, int extnum,
1010 u32 *value)
1011 {
1012 static const char propname[] = "ibm,ddw-extensions";
1013 u32 count;
1014 int ret;
1015
1016 ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count);
1017 if (ret)
1018 return ret;
1019
1020 if (count < extnum)
1021 return -EOVERFLOW;
1022
1023 if (!value)
1024 value = &count;
1025
1026 return of_property_read_u32_index(np, propname, extnum, value);
1027 }
1028
query_ddw(struct pci_dev * dev,const u32 * ddw_avail,struct ddw_query_response * query,struct device_node * parent)1029 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
1030 struct ddw_query_response *query,
1031 struct device_node *parent)
1032 {
1033 struct device_node *dn;
1034 struct pci_dn *pdn;
1035 u32 cfg_addr, ext_query, query_out[5];
1036 u64 buid;
1037 int ret, out_sz;
1038
1039 /*
1040 * From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many
1041 * output parameters ibm,query-pe-dma-windows will have, ranging from
1042 * 5 to 6.
1043 */
1044 ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query);
1045 if (!ret && ext_query == 1)
1046 out_sz = 6;
1047 else
1048 out_sz = 5;
1049
1050 /*
1051 * Get the config address and phb buid of the PE window.
1052 * Rely on eeh to retrieve this for us.
1053 * Retrieve them from the pci device, not the node with the
1054 * dma-window property
1055 */
1056 dn = pci_device_to_OF_node(dev);
1057 pdn = PCI_DN(dn);
1058 buid = pdn->phb->buid;
1059 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
1060
1061 ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
1062 cfg_addr, BUID_HI(buid), BUID_LO(buid));
1063
1064 switch (out_sz) {
1065 case 5:
1066 query->windows_available = query_out[0];
1067 query->largest_available_block = query_out[1];
1068 query->page_size = query_out[2];
1069 query->migration_capable = query_out[3];
1070 break;
1071 case 6:
1072 query->windows_available = query_out[0];
1073 query->largest_available_block = ((u64)query_out[1] << 32) |
1074 query_out[2];
1075 query->page_size = query_out[3];
1076 query->migration_capable = query_out[4];
1077 break;
1078 }
1079
1080 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n",
1081 ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
1082 BUID_LO(buid), ret, query->largest_available_block,
1083 query->page_size, query->windows_available);
1084
1085 return ret;
1086 }
1087
create_ddw(struct pci_dev * dev,const u32 * ddw_avail,struct ddw_create_response * create,int page_shift,int window_shift)1088 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
1089 struct ddw_create_response *create, int page_shift,
1090 int window_shift)
1091 {
1092 struct device_node *dn;
1093 struct pci_dn *pdn;
1094 u32 cfg_addr;
1095 u64 buid;
1096 int ret;
1097
1098 /*
1099 * Get the config address and phb buid of the PE window.
1100 * Rely on eeh to retrieve this for us.
1101 * Retrieve them from the pci device, not the node with the
1102 * dma-window property
1103 */
1104 dn = pci_device_to_OF_node(dev);
1105 pdn = PCI_DN(dn);
1106 buid = pdn->phb->buid;
1107 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
1108
1109 do {
1110 /* extra outputs are LIOBN and dma-addr (hi, lo) */
1111 ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4,
1112 (u32 *)create, cfg_addr, BUID_HI(buid),
1113 BUID_LO(buid), page_shift, window_shift);
1114 } while (rtas_busy_delay(ret));
1115 dev_info(&dev->dev,
1116 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
1117 "(liobn = 0x%x starting addr = %x %x)\n",
1118 ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
1119 BUID_LO(buid), page_shift, window_shift, ret, create->liobn,
1120 create->addr_hi, create->addr_lo);
1121
1122 return ret;
1123 }
1124
1125 struct failed_ddw_pdn {
1126 struct device_node *pdn;
1127 struct list_head list;
1128 };
1129
1130 static LIST_HEAD(failed_ddw_pdn_list);
1131
ddw_memory_hotplug_max(void)1132 static phys_addr_t ddw_memory_hotplug_max(void)
1133 {
1134 resource_size_t max_addr = memory_hotplug_max();
1135 struct device_node *memory;
1136
1137 for_each_node_by_type(memory, "memory") {
1138 struct resource res;
1139
1140 if (of_address_to_resource(memory, 0, &res))
1141 continue;
1142
1143 max_addr = max_t(resource_size_t, max_addr, res.end + 1);
1144 }
1145
1146 return max_addr;
1147 }
1148
1149 /*
1150 * Platforms supporting the DDW option starting with LoPAR level 2.7 implement
1151 * ibm,ddw-extensions, which carries the rtas token for
1152 * ibm,reset-pe-dma-windows.
1153 * That rtas-call can be used to restore the default DMA window for the device.
1154 */
reset_dma_window(struct pci_dev * dev,struct device_node * par_dn)1155 static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
1156 {
1157 int ret;
1158 u32 cfg_addr, reset_dma_win;
1159 u64 buid;
1160 struct device_node *dn;
1161 struct pci_dn *pdn;
1162
1163 ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
1164 if (ret)
1165 return;
1166
1167 dn = pci_device_to_OF_node(dev);
1168 pdn = PCI_DN(dn);
1169 buid = pdn->phb->buid;
1170 cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
1171
1172 ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid),
1173 BUID_LO(buid));
1174 if (ret)
1175 dev_info(&dev->dev,
1176 "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ",
1177 reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
1178 ret);
1179 }
1180
1181 /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
iommu_get_page_shift(u32 query_page_size)1182 static int iommu_get_page_shift(u32 query_page_size)
1183 {
1184 /* Supported IO page-sizes according to LoPAR, note that 2M is out of order */
1185 const int shift[] = {
1186 __builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
1187 __builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
1188 __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G), __builtin_ctzll(SZ_2M)
1189 };
1190
1191 int i = ARRAY_SIZE(shift) - 1;
1192 int ret = 0;
1193
1194 /*
1195 * On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
1196 * - bit 31 means 4k pages are supported,
1197 * - bit 30 means 64k pages are supported, and so on.
1198 * Larger pagesizes map more memory with the same amount of TCEs, so start probing them.
1199 */
1200 for (; i >= 0 ; i--) {
1201 if (query_page_size & (1 << i))
1202 ret = max(ret, shift[i]);
1203 }
1204
1205 return ret;
1206 }
1207
ddw_property_create(const char * propname,u32 liobn,u64 dma_addr,u32 page_shift,u32 window_shift)1208 static struct property *ddw_property_create(const char *propname, u32 liobn, u64 dma_addr,
1209 u32 page_shift, u32 window_shift)
1210 {
1211 struct dynamic_dma_window_prop *ddwprop;
1212 struct property *win64;
1213
1214 win64 = kzalloc(sizeof(*win64), GFP_KERNEL);
1215 if (!win64)
1216 return NULL;
1217
1218 win64->name = kstrdup(propname, GFP_KERNEL);
1219 ddwprop = kzalloc(sizeof(*ddwprop), GFP_KERNEL);
1220 win64->value = ddwprop;
1221 win64->length = sizeof(*ddwprop);
1222 if (!win64->name || !win64->value) {
1223 kfree(win64->name);
1224 kfree(win64->value);
1225 kfree(win64);
1226 return NULL;
1227 }
1228
1229 ddwprop->liobn = cpu_to_be32(liobn);
1230 ddwprop->dma_base = cpu_to_be64(dma_addr);
1231 ddwprop->tce_shift = cpu_to_be32(page_shift);
1232 ddwprop->window_shift = cpu_to_be32(window_shift);
1233
1234 return win64;
1235 }
1236
1237 /*
1238 * If the PE supports dynamic dma windows, and there is space for a table
1239 * that can map all pages in a linear offset, then setup such a table,
1240 * and record the dma-offset in the struct device.
1241 *
1242 * dev: the pci device we are checking
1243 * pdn: the parent pe node with the ibm,dma_window property
1244 * Future: also check if we can remap the base window for our base page size
1245 *
1246 * returns true if can map all pages (direct mapping), false otherwise..
1247 */
enable_ddw(struct pci_dev * dev,struct device_node * pdn)1248 static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1249 {
1250 int len = 0, ret;
1251 int max_ram_len = order_base_2(ddw_memory_hotplug_max());
1252 struct ddw_query_response query;
1253 struct ddw_create_response create;
1254 int page_shift;
1255 u64 win_addr;
1256 const char *win_name;
1257 struct device_node *dn;
1258 u32 ddw_avail[DDW_APPLICABLE_SIZE];
1259 struct dma_win *window;
1260 struct property *win64;
1261 struct failed_ddw_pdn *fpdn;
1262 bool default_win_removed = false, direct_mapping = false;
1263 bool pmem_present;
1264 struct pci_dn *pci = PCI_DN(pdn);
1265 struct property *default_win = NULL;
1266
1267 dn = of_find_node_by_type(NULL, "ibm,pmemory");
1268 pmem_present = dn != NULL;
1269 of_node_put(dn);
1270
1271 mutex_lock(&dma_win_init_mutex);
1272
1273 if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
1274 direct_mapping = (len >= max_ram_len);
1275 goto out_unlock;
1276 }
1277
1278 /*
1279 * If we already went through this for a previous function of
1280 * the same device and failed, we don't want to muck with the
1281 * DMA window again, as it will race with in-flight operations
1282 * and can lead to EEHs. The above mutex protects access to the
1283 * list.
1284 */
1285 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1286 if (fpdn->pdn == pdn)
1287 goto out_unlock;
1288 }
1289
1290 /*
1291 * the ibm,ddw-applicable property holds the tokens for:
1292 * ibm,query-pe-dma-window
1293 * ibm,create-pe-dma-window
1294 * ibm,remove-pe-dma-window
1295 * for the given node in that order.
1296 * the property is actually in the parent, not the PE
1297 */
1298 ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1299 &ddw_avail[0], DDW_APPLICABLE_SIZE);
1300 if (ret)
1301 goto out_failed;
1302
1303 /*
1304 * Query if there is a second window of size to map the
1305 * whole partition. Query returns number of windows, largest
1306 * block assigned to PE (partition endpoint), and two bitmasks
1307 * of page sizes: supported and supported for migrate-dma.
1308 */
1309 dn = pci_device_to_OF_node(dev);
1310 ret = query_ddw(dev, ddw_avail, &query, pdn);
1311 if (ret != 0)
1312 goto out_failed;
1313
1314 /*
1315 * If there is no window available, remove the default DMA window,
1316 * if it's present. This will make all the resources available to the
1317 * new DDW window.
1318 * If anything fails after this, we need to restore it, so also check
1319 * for extensions presence.
1320 */
1321 if (query.windows_available == 0) {
1322 int reset_win_ext;
1323
1324 /* DDW + IOMMU on single window may fail if there is any allocation */
1325 if (iommu_table_in_use(pci->table_group->tables[0])) {
1326 dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
1327 goto out_failed;
1328 }
1329
1330 default_win = of_find_property(pdn, "ibm,dma-window", NULL);
1331 if (!default_win)
1332 goto out_failed;
1333
1334 reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL);
1335 if (reset_win_ext)
1336 goto out_failed;
1337
1338 remove_dma_window(pdn, ddw_avail, default_win);
1339 default_win_removed = true;
1340
1341 /* Query again, to check if the window is available */
1342 ret = query_ddw(dev, ddw_avail, &query, pdn);
1343 if (ret != 0)
1344 goto out_failed;
1345
1346 if (query.windows_available == 0) {
1347 /* no windows are available for this device. */
1348 dev_dbg(&dev->dev, "no free dynamic windows");
1349 goto out_failed;
1350 }
1351 }
1352
1353 page_shift = iommu_get_page_shift(query.page_size);
1354 if (!page_shift) {
1355 dev_dbg(&dev->dev, "no supported page size in mask %x",
1356 query.page_size);
1357 goto out_failed;
1358 }
1359
1360
1361 /*
1362 * The "ibm,pmemory" can appear anywhere in the address space.
1363 * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
1364 * for the upper limit and fallback to max RAM otherwise but this
1365 * disables device::dma_ops_bypass.
1366 */
1367 len = max_ram_len;
1368 if (pmem_present) {
1369 if (query.largest_available_block >=
1370 (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
1371 len = MAX_PHYSMEM_BITS;
1372 else
1373 dev_info(&dev->dev, "Skipping ibm,pmemory");
1374 }
1375
1376 /* check if the available block * number of ptes will map everything */
1377 if (query.largest_available_block < (1ULL << (len - page_shift))) {
1378 dev_dbg(&dev->dev,
1379 "can't map partition max 0x%llx with %llu %llu-sized pages\n",
1380 1ULL << len,
1381 query.largest_available_block,
1382 1ULL << page_shift);
1383
1384 len = order_base_2(query.largest_available_block << page_shift);
1385 win_name = DMA64_PROPNAME;
1386 } else {
1387 direct_mapping = !default_win_removed ||
1388 (len == MAX_PHYSMEM_BITS) ||
1389 (!pmem_present && (len == max_ram_len));
1390 win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
1391 }
1392
1393 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1394 if (ret != 0)
1395 goto out_failed;
1396
1397 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1398 create.liobn, dn);
1399
1400 win_addr = ((u64)create.addr_hi << 32) | create.addr_lo;
1401 win64 = ddw_property_create(win_name, create.liobn, win_addr, page_shift, len);
1402
1403 if (!win64) {
1404 dev_info(&dev->dev,
1405 "couldn't allocate property, property name, or value\n");
1406 goto out_remove_win;
1407 }
1408
1409 ret = of_add_property(pdn, win64);
1410 if (ret) {
1411 dev_err(&dev->dev, "unable to add DMA window property for %pOF: %d",
1412 pdn, ret);
1413 goto out_free_prop;
1414 }
1415
1416 window = ddw_list_new_entry(pdn, win64->value);
1417 if (!window)
1418 goto out_del_prop;
1419
1420 if (direct_mapping) {
1421 window->direct = true;
1422
1423 /* DDW maps the whole partition, so enable direct DMA mapping */
1424 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1425 win64->value, tce_setrange_multi_pSeriesLP_walk);
1426 if (ret) {
1427 dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
1428 dn, ret);
1429
1430 /* Make sure to clean DDW if any TCE was set*/
1431 clean_dma_window(pdn, win64->value);
1432 goto out_del_list;
1433 }
1434 } else {
1435 struct iommu_table *newtbl;
1436 int i;
1437 unsigned long start = 0, end = 0;
1438
1439 window->direct = false;
1440
1441 for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
1442 const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
1443
1444 /* Look for MMIO32 */
1445 if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
1446 start = pci->phb->mem_resources[i].start;
1447 end = pci->phb->mem_resources[i].end;
1448 break;
1449 }
1450 }
1451
1452 /* New table for using DDW instead of the default DMA window */
1453 newtbl = iommu_pseries_alloc_table(pci->phb->node);
1454 if (!newtbl) {
1455 dev_dbg(&dev->dev, "couldn't create new IOMMU table\n");
1456 goto out_del_list;
1457 }
1458
1459 iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
1460 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
1461 iommu_init_table(newtbl, pci->phb->node, start, end);
1462
1463 pci->table_group->tables[1] = newtbl;
1464
1465 set_iommu_table_base(&dev->dev, newtbl);
1466 }
1467
1468 if (default_win_removed) {
1469 iommu_tce_table_put(pci->table_group->tables[0]);
1470 pci->table_group->tables[0] = NULL;
1471
1472 /* default_win is valid here because default_win_removed == true */
1473 of_remove_property(pdn, default_win);
1474 dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn);
1475 }
1476
1477 spin_lock(&dma_win_list_lock);
1478 list_add(&window->list, &dma_win_list);
1479 spin_unlock(&dma_win_list_lock);
1480
1481 dev->dev.archdata.dma_offset = win_addr;
1482 goto out_unlock;
1483
1484 out_del_list:
1485 kfree(window);
1486
1487 out_del_prop:
1488 of_remove_property(pdn, win64);
1489
1490 out_free_prop:
1491 kfree(win64->name);
1492 kfree(win64->value);
1493 kfree(win64);
1494
1495 out_remove_win:
1496 /* DDW is clean, so it's ok to call this directly. */
1497 __remove_dma_window(pdn, ddw_avail, create.liobn);
1498
1499 out_failed:
1500 if (default_win_removed)
1501 reset_dma_window(dev, pdn);
1502
1503 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1504 if (!fpdn)
1505 goto out_unlock;
1506 fpdn->pdn = pdn;
1507 list_add(&fpdn->list, &failed_ddw_pdn_list);
1508
1509 out_unlock:
1510 mutex_unlock(&dma_win_init_mutex);
1511
1512 /*
1513 * If we have persistent memory and the window size is only as big
1514 * as RAM, then we failed to create a window to cover persistent
1515 * memory and need to set the DMA limit.
1516 */
1517 if (pmem_present && direct_mapping && len == max_ram_len)
1518 dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
1519
1520 return direct_mapping;
1521 }
1522
pci_dma_dev_setup_pSeriesLP(struct pci_dev * dev)1523 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1524 {
1525 struct device_node *pdn, *dn;
1526 struct iommu_table *tbl;
1527 const __be32 *dma_window = NULL;
1528 struct pci_dn *pci;
1529
1530 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1531
1532 /* dev setup for LPAR is a little tricky, since the device tree might
1533 * contain the dma-window properties per-device and not necessarily
1534 * for the bus. So we need to search upwards in the tree until we
1535 * either hit a dma-window property, OR find a parent with a table
1536 * already allocated.
1537 */
1538 dn = pci_device_to_OF_node(dev);
1539 pr_debug(" node is %pOF\n", dn);
1540
1541 pdn = pci_dma_find(dn, &dma_window);
1542 if (!pdn || !PCI_DN(pdn)) {
1543 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1544 "no DMA window found for pci dev=%s dn=%pOF\n",
1545 pci_name(dev), dn);
1546 return;
1547 }
1548 pr_debug(" parent is %pOF\n", pdn);
1549
1550 pci = PCI_DN(pdn);
1551 if (!pci->table_group) {
1552 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1553 tbl = pci->table_group->tables[0];
1554 iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1555 pci->table_group, dma_window);
1556
1557 iommu_init_table(tbl, pci->phb->node, 0, 0);
1558 iommu_register_group(pci->table_group,
1559 pci_domain_nr(pci->phb->bus), 0);
1560 pr_debug(" created table: %p\n", pci->table_group);
1561 } else {
1562 pr_debug(" found DMA window, table: %p\n", pci->table_group);
1563 }
1564
1565 set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1566 iommu_add_device(pci->table_group, &dev->dev);
1567 }
1568
iommu_bypass_supported_pSeriesLP(struct pci_dev * pdev,u64 dma_mask)1569 static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
1570 {
1571 struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
1572
1573 /* only attempt to use a new window if 64-bit DMA is requested */
1574 if (dma_mask < DMA_BIT_MASK(64))
1575 return false;
1576
1577 dev_dbg(&pdev->dev, "node is %pOF\n", dn);
1578
1579 /*
1580 * the device tree might contain the dma-window properties
1581 * per-device and not necessarily for the bus. So we need to
1582 * search upwards in the tree until we either hit a dma-window
1583 * property, OR find a parent with a table already allocated.
1584 */
1585 pdn = pci_dma_find(dn, NULL);
1586 if (pdn && PCI_DN(pdn))
1587 return enable_ddw(pdev, pdn);
1588
1589 return false;
1590 }
1591
iommu_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)1592 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1593 void *data)
1594 {
1595 struct dma_win *window;
1596 struct memory_notify *arg = data;
1597 int ret = 0;
1598
1599 switch (action) {
1600 case MEM_GOING_ONLINE:
1601 spin_lock(&dma_win_list_lock);
1602 list_for_each_entry(window, &dma_win_list, list) {
1603 if (window->direct) {
1604 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1605 arg->nr_pages, window->prop);
1606 }
1607 /* XXX log error */
1608 }
1609 spin_unlock(&dma_win_list_lock);
1610 break;
1611 case MEM_CANCEL_ONLINE:
1612 case MEM_OFFLINE:
1613 spin_lock(&dma_win_list_lock);
1614 list_for_each_entry(window, &dma_win_list, list) {
1615 if (window->direct) {
1616 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1617 arg->nr_pages, window->prop);
1618 }
1619 /* XXX log error */
1620 }
1621 spin_unlock(&dma_win_list_lock);
1622 break;
1623 default:
1624 break;
1625 }
1626 if (ret && action != MEM_CANCEL_ONLINE)
1627 return NOTIFY_BAD;
1628
1629 return NOTIFY_OK;
1630 }
1631
1632 static struct notifier_block iommu_mem_nb = {
1633 .notifier_call = iommu_mem_notifier,
1634 };
1635
iommu_reconfig_notifier(struct notifier_block * nb,unsigned long action,void * data)1636 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1637 {
1638 int err = NOTIFY_OK;
1639 struct of_reconfig_data *rd = data;
1640 struct device_node *np = rd->dn;
1641 struct pci_dn *pci = PCI_DN(np);
1642 struct dma_win *window;
1643
1644 switch (action) {
1645 case OF_RECONFIG_DETACH_NODE:
1646 /*
1647 * Removing the property will invoke the reconfig
1648 * notifier again, which causes dead-lock on the
1649 * read-write semaphore of the notifier chain. So
1650 * we have to remove the property when releasing
1651 * the device node.
1652 */
1653 if (remove_ddw(np, false, DIRECT64_PROPNAME))
1654 remove_ddw(np, false, DMA64_PROPNAME);
1655
1656 if (pci && pci->table_group)
1657 iommu_pseries_free_group(pci->table_group,
1658 np->full_name);
1659
1660 spin_lock(&dma_win_list_lock);
1661 list_for_each_entry(window, &dma_win_list, list) {
1662 if (window->device == np) {
1663 list_del(&window->list);
1664 kfree(window);
1665 break;
1666 }
1667 }
1668 spin_unlock(&dma_win_list_lock);
1669 break;
1670 default:
1671 err = NOTIFY_DONE;
1672 break;
1673 }
1674 return err;
1675 }
1676
1677 static struct notifier_block iommu_reconfig_nb = {
1678 .notifier_call = iommu_reconfig_notifier,
1679 };
1680
1681 /* These are called very early. */
iommu_init_early_pSeries(void)1682 void __init iommu_init_early_pSeries(void)
1683 {
1684 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1685 return;
1686
1687 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1688 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1689 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1690 if (!disable_ddw)
1691 pseries_pci_controller_ops.iommu_bypass_supported =
1692 iommu_bypass_supported_pSeriesLP;
1693 } else {
1694 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1695 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1696 }
1697
1698
1699 of_reconfig_notifier_register(&iommu_reconfig_nb);
1700 register_memory_notifier(&iommu_mem_nb);
1701
1702 set_pci_dma_ops(&dma_iommu_ops);
1703 }
1704
disable_multitce(char * str)1705 static int __init disable_multitce(char *str)
1706 {
1707 if (strcmp(str, "off") == 0 &&
1708 firmware_has_feature(FW_FEATURE_LPAR) &&
1709 (firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
1710 firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
1711 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1712 powerpc_firmware_features &=
1713 ~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
1714 }
1715 return 1;
1716 }
1717
1718 __setup("multitce=", disable_multitce);
1719
1720 #ifdef CONFIG_SPAPR_TCE_IOMMU
pSeries_pci_device_group(struct pci_controller * hose,struct pci_dev * pdev)1721 struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
1722 struct pci_dev *pdev)
1723 {
1724 struct device_node *pdn, *dn = pdev->dev.of_node;
1725 struct iommu_group *grp;
1726 struct pci_dn *pci;
1727
1728 pdn = pci_dma_find(dn, NULL);
1729 if (!pdn || !PCI_DN(pdn))
1730 return ERR_PTR(-ENODEV);
1731
1732 pci = PCI_DN(pdn);
1733 if (!pci->table_group)
1734 return ERR_PTR(-ENODEV);
1735
1736 grp = pci->table_group->group;
1737 if (!grp)
1738 return ERR_PTR(-ENODEV);
1739
1740 return iommu_group_ref_get(grp);
1741 }
1742 #endif
1743