Lines Matching full:iommu

3  * IOMMU implementation for Cell Broadband Processor Architecture
22 #include <asm/iommu.h>
93 /* IOMMU sizing */
102 struct cbe_iommu *iommu; member
129 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, in invalidate_tce_cache() argument
136 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; in invalidate_tce_cache()
193 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
216 __pa(window->iommu->pad_page) | in tce_free_cell()
227 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
233 struct cbe_iommu *iommu = data; in ioc_interrupt() local
235 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in ioc_interrupt()
239 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); in ioc_interrupt()
251 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); in ioc_interrupt()
268 printk(KERN_ERR "iommu: can't get address for %pOF\n", in cell_iommu_find_ioc()
296 static void cell_iommu_setup_stab(struct cbe_iommu *iommu, in cell_iommu_setup_stab() argument
305 pr_debug("%s: iommu[%d]: segments: %lu\n", in cell_iommu_setup_stab()
306 __func__, iommu->nid, segments); in cell_iommu_setup_stab()
310 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); in cell_iommu_setup_stab()
312 iommu->stab = page_address(page); in cell_iommu_setup_stab()
313 memset(iommu->stab, 0, stab_size); in cell_iommu_setup_stab()
316 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, in cell_iommu_alloc_ptab() argument
333 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, in cell_iommu_alloc_ptab()
334 iommu->nid, ptab_size, get_order(ptab_size)); in cell_iommu_alloc_ptab()
335 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); in cell_iommu_alloc_ptab()
344 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", in cell_iommu_alloc_ptab()
345 __func__, iommu->nid, iommu->stab, ptab, in cell_iommu_alloc_ptab()
362 pr_debug("Setting up IOMMU stab:\n"); in cell_iommu_alloc_ptab()
368 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * in cell_iommu_alloc_ptab()
370 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); in cell_iommu_alloc_ptab()
376 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) in cell_iommu_enable_hardware() argument
382 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) in cell_iommu_enable_hardware()
384 __func__, iommu->nid); in cell_iommu_enable_hardware()
386 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); in cell_iommu_enable_hardware()
387 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; in cell_iommu_enable_hardware()
392 /* setup interrupts for the iommu. */ in cell_iommu_enable_hardware()
393 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in cell_iommu_enable_hardware()
394 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, in cell_iommu_enable_hardware()
396 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, in cell_iommu_enable_hardware()
400 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); in cell_iommu_enable_hardware()
403 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); in cell_iommu_enable_hardware()
406 /* set the IOC segment table origin register (and turn on the iommu) */ in cell_iommu_enable_hardware()
407 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; in cell_iommu_enable_hardware()
408 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in cell_iommu_enable_hardware()
409 in_be64(iommu->xlate_regs + IOC_IOST_Origin); in cell_iommu_enable_hardware()
412 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; in cell_iommu_enable_hardware()
413 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); in cell_iommu_enable_hardware()
416 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, in cell_iommu_setup_hardware() argument
419 cell_iommu_setup_stab(iommu, base, size, 0, 0); in cell_iommu_setup_hardware()
420 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, in cell_iommu_setup_hardware()
422 cell_iommu_enable_hardware(iommu); in cell_iommu_setup_hardware()
426 static struct iommu_window *find_window(struct cbe_iommu *iommu,
433 list_for_each_entry(window, &(iommu->windows), list) {
448 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", in cell_iommu_get_ioid()
462 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, in cell_iommu_setup_window() argument
472 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); in cell_iommu_setup_window()
478 window->iommu = iommu; in cell_iommu_setup_window()
481 window->table.it_base = (unsigned long)iommu->ptab; in cell_iommu_setup_window()
482 window->table.it_index = iommu->nid; in cell_iommu_setup_window()
489 iommu_init_table(&window->table, iommu->nid, 0, 0); in cell_iommu_setup_window()
497 list_add(&window->list, &iommu->windows); in cell_iommu_setup_window()
502 /* We need to map and reserve the first IOMMU page since it's used in cell_iommu_setup_window()
509 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); in cell_iommu_setup_window()
511 iommu->pad_page = page_address(page); in cell_iommu_setup_window()
512 clear_page(iommu->pad_page); in cell_iommu_setup_window()
516 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); in cell_iommu_setup_window()
542 struct cbe_iommu *iommu; in cell_get_iommu_table() local
545 * node's iommu. We -might- do something smarter later though it may in cell_get_iommu_table()
548 iommu = cell_iommu_for_node(dev_to_node(dev)); in cell_get_iommu_table()
549 if (iommu == NULL || list_empty(&iommu->windows)) { in cell_get_iommu_table()
550 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", in cell_get_iommu_table()
554 window = list_entry(iommu->windows.next, struct iommu_window, list); in cell_get_iommu_table()
619 struct cbe_iommu *iommu; in cell_iommu_alloc() local
625 printk(KERN_ERR "iommu: failed to get node for %pOF\n", in cell_iommu_alloc()
629 pr_debug("iommu: setting up iommu for node %d (%pOF)\n", in cell_iommu_alloc()
632 /* XXX todo: If we can have multiple windows on the same IOMMU, which in cell_iommu_alloc()
634 * iommu for that node is already setup. in cell_iommu_alloc()
637 * multiple window support since the cell iommu supports per-page ioids in cell_iommu_alloc()
641 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", in cell_iommu_alloc()
648 iommu = &iommus[i]; in cell_iommu_alloc()
649 iommu->stab = NULL; in cell_iommu_alloc()
650 iommu->nid = nid; in cell_iommu_alloc()
651 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); in cell_iommu_alloc()
652 INIT_LIST_HEAD(&iommu->windows); in cell_iommu_alloc()
654 return iommu; in cell_iommu_alloc()
660 struct cbe_iommu *iommu; in cell_iommu_init_one() local
663 iommu = cell_iommu_alloc(np); in cell_iommu_init_one()
664 if (!iommu) in cell_iommu_init_one()
674 cell_iommu_setup_hardware(iommu, base, size); in cell_iommu_init_one()
677 cell_iommu_setup_window(iommu, np, base, size, in cell_iommu_init_one()
696 pr_debug("iommu: cleaning up iommu on node %d\n", node); in cell_disable_iommus()
714 /* When no iommu is present, we use direct DMA ops */ in cell_iommu_init_disabled()
746 * all of physical memory. If not, we force enable IOMMU in cell_iommu_init_disabled()
749 printk(KERN_WARNING "iommu: force-enabled, dma window" in cell_iommu_init_disabled()
760 printk("iommu: disabled, direct DMA offset is 0x%lx\n", in cell_iommu_init_disabled()
767 * Fixed IOMMU mapping support
769 * This code adds support for setting up a fixed IOMMU mapping on certain
776 * we setup the fixed mapping immediately above the normal IOMMU window.
779 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
785 * mapping above the normal IOMMU window as we would run out of address space.
786 * Instead we move the normal IOMMU window to coincide with the hash page
819 dev_dbg(dev, "iommu: no dma-ranges found\n"); in cell_iommu_get_fixed_address()
846 dev_dbg(dev, "iommu: no suitable range found!\n"); in cell_iommu_get_fixed_address()
869 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", in insert_16M_pte()
875 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, in cell_iommu_setup_fixed_ptab() argument
881 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); in cell_iommu_setup_fixed_ptab()
885 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); in cell_iommu_setup_fixed_ptab()
891 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
893 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
901 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); in cell_iommu_setup_fixed_ptab()
914 struct cbe_iommu *iommu; in cell_iommu_fixed_mapping_init() local
922 pr_debug("iommu: fixed mapping disabled, no axons found\n"); in cell_iommu_fixed_mapping_init()
931 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); in cell_iommu_fixed_mapping_init()
936 * dynamic region, so find the top of the largest IOMMU window in cell_iommu_fixed_mapping_init()
959 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); in cell_iommu_fixed_mapping_init()
968 pr_debug("iommu: hash window not segment aligned\n"); in cell_iommu_fixed_mapping_init()
977 pr_debug("iommu: hash window doesn't fit in" in cell_iommu_fixed_mapping_init()
988 iommu = cell_iommu_alloc(np); in cell_iommu_fixed_mapping_init()
989 BUG_ON(!iommu); in cell_iommu_fixed_mapping_init()
998 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " in cell_iommu_fixed_mapping_init()
999 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, in cell_iommu_fixed_mapping_init()
1002 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); in cell_iommu_fixed_mapping_init()
1003 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, in cell_iommu_fixed_mapping_init()
1005 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, in cell_iommu_fixed_mapping_init()
1007 cell_iommu_enable_hardware(iommu); in cell_iommu_fixed_mapping_init()
1008 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); in cell_iommu_fixed_mapping_init()
1045 /* If IOMMU is disabled or we have little enough RAM to not need in cell_iommu_init()
1048 * Note: should we make sure we have the IOMMU actually disabled ? in cell_iommu_init()
1061 /* Create an iommu for each /axon node. */ in cell_iommu_init()
1068 /* Create an iommu for each toplevel /pci-internal node for in cell_iommu_init()
1077 /* Setup default PCI iommu ops */ in cell_iommu_init()