Lines Matching full:iommu
3 * IOMMU implementation for Cell Broadband Processor Architecture
24 #include <asm/iommu.h>
95 /* IOMMU sizing */
104 struct cbe_iommu *iommu; member
131 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, in invalidate_tce_cache() argument
138 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; in invalidate_tce_cache()
195 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
218 __pa(window->iommu->pad_page) | in tce_free_cell()
229 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
235 struct cbe_iommu *iommu = data; in ioc_interrupt() local
237 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in ioc_interrupt()
241 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); in ioc_interrupt()
253 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); in ioc_interrupt()
270 printk(KERN_ERR "iommu: can't get address for %pOF\n", in cell_iommu_find_ioc()
298 static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, in cell_iommu_setup_stab() argument
307 pr_debug("%s: iommu[%d]: segments: %lu\n", in cell_iommu_setup_stab()
308 __func__, iommu->nid, segments); in cell_iommu_setup_stab()
312 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); in cell_iommu_setup_stab()
314 iommu->stab = page_address(page); in cell_iommu_setup_stab()
315 memset(iommu->stab, 0, stab_size); in cell_iommu_setup_stab()
318 static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, in cell_iommu_alloc_ptab() argument
335 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, in cell_iommu_alloc_ptab()
336 iommu->nid, ptab_size, get_order(ptab_size)); in cell_iommu_alloc_ptab()
337 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); in cell_iommu_alloc_ptab()
346 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", in cell_iommu_alloc_ptab()
347 __func__, iommu->nid, iommu->stab, ptab, in cell_iommu_alloc_ptab()
364 pr_debug("Setting up IOMMU stab:\n"); in cell_iommu_alloc_ptab()
370 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * in cell_iommu_alloc_ptab()
372 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); in cell_iommu_alloc_ptab()
378 static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) in cell_iommu_enable_hardware() argument
384 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) in cell_iommu_enable_hardware()
386 __func__, iommu->nid); in cell_iommu_enable_hardware()
388 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); in cell_iommu_enable_hardware()
389 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; in cell_iommu_enable_hardware()
394 /* setup interrupts for the iommu. */ in cell_iommu_enable_hardware()
395 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in cell_iommu_enable_hardware()
396 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, in cell_iommu_enable_hardware()
398 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, in cell_iommu_enable_hardware()
402 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); in cell_iommu_enable_hardware()
405 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); in cell_iommu_enable_hardware()
408 /* set the IOC segment table origin register (and turn on the iommu) */ in cell_iommu_enable_hardware()
409 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; in cell_iommu_enable_hardware()
410 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in cell_iommu_enable_hardware()
411 in_be64(iommu->xlate_regs + IOC_IOST_Origin); in cell_iommu_enable_hardware()
414 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; in cell_iommu_enable_hardware()
415 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); in cell_iommu_enable_hardware()
418 static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, in cell_iommu_setup_hardware() argument
421 cell_iommu_setup_stab(iommu, base, size, 0, 0); in cell_iommu_setup_hardware()
422 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, in cell_iommu_setup_hardware()
424 cell_iommu_enable_hardware(iommu); in cell_iommu_setup_hardware()
428 static struct iommu_window *find_window(struct cbe_iommu *iommu,
435 list_for_each_entry(window, &(iommu->windows), list) {
450 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", in cell_iommu_get_ioid()
464 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, in cell_iommu_setup_window() argument
474 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); in cell_iommu_setup_window()
480 window->iommu = iommu; in cell_iommu_setup_window()
483 window->table.it_base = (unsigned long)iommu->ptab; in cell_iommu_setup_window()
484 window->table.it_index = iommu->nid; in cell_iommu_setup_window()
491 if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) in cell_iommu_setup_window()
492 panic("Failed to initialize iommu table"); in cell_iommu_setup_window()
500 list_add(&window->list, &iommu->windows); in cell_iommu_setup_window()
505 /* We need to map and reserve the first IOMMU page since it's used in cell_iommu_setup_window()
512 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); in cell_iommu_setup_window()
514 iommu->pad_page = page_address(page); in cell_iommu_setup_window()
515 clear_page(iommu->pad_page); in cell_iommu_setup_window()
519 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); in cell_iommu_setup_window()
545 struct cbe_iommu *iommu; in cell_get_iommu_table() local
548 * node's iommu. We -might- do something smarter later though it may in cell_get_iommu_table()
551 iommu = cell_iommu_for_node(dev_to_node(dev)); in cell_get_iommu_table()
552 if (iommu == NULL || list_empty(&iommu->windows)) { in cell_get_iommu_table()
553 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", in cell_get_iommu_table()
557 window = list_entry(iommu->windows.next, struct iommu_window, list); in cell_get_iommu_table()
622 struct cbe_iommu *iommu; in cell_iommu_alloc() local
628 printk(KERN_ERR "iommu: failed to get node for %pOF\n", in cell_iommu_alloc()
632 pr_debug("iommu: setting up iommu for node %d (%pOF)\n", in cell_iommu_alloc()
635 /* XXX todo: If we can have multiple windows on the same IOMMU, which in cell_iommu_alloc()
637 * iommu for that node is already setup. in cell_iommu_alloc()
640 * multiple window support since the cell iommu supports per-page ioids in cell_iommu_alloc()
644 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", in cell_iommu_alloc()
651 iommu = &iommus[i]; in cell_iommu_alloc()
652 iommu->stab = NULL; in cell_iommu_alloc()
653 iommu->nid = nid; in cell_iommu_alloc()
654 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); in cell_iommu_alloc()
655 INIT_LIST_HEAD(&iommu->windows); in cell_iommu_alloc()
657 return iommu; in cell_iommu_alloc()
663 struct cbe_iommu *iommu; in cell_iommu_init_one() local
666 iommu = cell_iommu_alloc(np); in cell_iommu_init_one()
667 if (!iommu) in cell_iommu_init_one()
677 cell_iommu_setup_hardware(iommu, base, size); in cell_iommu_init_one()
680 cell_iommu_setup_window(iommu, np, base, size, in cell_iommu_init_one()
699 pr_debug("iommu: cleaning up iommu on node %d\n", node); in cell_disable_iommus()
717 /* When no iommu is present, we use direct DMA ops */ in cell_iommu_init_disabled()
751 * all of physical memory. If not, we force enable IOMMU in cell_iommu_init_disabled()
754 printk(KERN_WARNING "iommu: force-enabled, dma window" in cell_iommu_init_disabled()
765 printk("iommu: disabled, direct DMA offset is 0x%lx\n", in cell_iommu_init_disabled()
772 * Fixed IOMMU mapping support
774 * This code adds support for setting up a fixed IOMMU mapping on certain
781 * we setup the fixed mapping immediately above the normal IOMMU window.
784 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
790 * mapping above the normal IOMMU window as we would run out of address space.
791 * Instead we move the normal IOMMU window to coincide with the hash page
824 dev_dbg(dev, "iommu: no dma-ranges found\n"); in cell_iommu_get_fixed_address()
851 dev_dbg(dev, "iommu: no suitable range found!\n"); in cell_iommu_get_fixed_address()
874 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", in insert_16M_pte()
880 static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, in cell_iommu_setup_fixed_ptab() argument
886 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); in cell_iommu_setup_fixed_ptab()
890 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); in cell_iommu_setup_fixed_ptab()
896 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
898 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
906 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); in cell_iommu_setup_fixed_ptab()
919 struct cbe_iommu *iommu; in cell_iommu_fixed_mapping_init() local
927 pr_debug("iommu: fixed mapping disabled, no axons found\n"); in cell_iommu_fixed_mapping_init()
936 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); in cell_iommu_fixed_mapping_init()
941 * dynamic region, so find the top of the largest IOMMU window in cell_iommu_fixed_mapping_init()
964 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); in cell_iommu_fixed_mapping_init()
973 pr_debug("iommu: hash window not segment aligned\n"); in cell_iommu_fixed_mapping_init()
982 pr_debug("iommu: hash window doesn't fit in" in cell_iommu_fixed_mapping_init()
994 iommu = cell_iommu_alloc(np); in cell_iommu_fixed_mapping_init()
995 BUG_ON(!iommu); in cell_iommu_fixed_mapping_init()
1004 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " in cell_iommu_fixed_mapping_init()
1005 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, in cell_iommu_fixed_mapping_init()
1008 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); in cell_iommu_fixed_mapping_init()
1009 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, in cell_iommu_fixed_mapping_init()
1011 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, in cell_iommu_fixed_mapping_init()
1013 cell_iommu_enable_hardware(iommu); in cell_iommu_fixed_mapping_init()
1014 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); in cell_iommu_fixed_mapping_init()
1051 /* If IOMMU is disabled or we have little enough RAM to not need in cell_iommu_init()
1054 * Note: should we make sure we have the IOMMU actually disabled ? in cell_iommu_init()
1067 /* Create an iommu for each /axon node. */ in cell_iommu_init()
1074 /* Create an iommu for each toplevel /pci-internal node for in cell_iommu_init()
1083 /* Setup default PCI iommu ops */ in cell_iommu_init()