Lines Matching full:iommu

2 /* iommu.c: Generic sparc64 IOMMU support.
15 #include <linux/iommu-helper.h>
17 #include <asm/iommu-common.h>
23 #include <asm/iommu.h>
49 /* Must be invoked under the IOMMU lock. */
52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() local
53 if (iommu->iommu_flushinv) { in iommu_flushall()
54 iommu_write(iommu->iommu_flushinv, ~(u64)0); in iommu_flushall()
59 tag = iommu->iommu_tags; in iommu_flushall()
66 (void) iommu_read(iommu->write_complete_reg); in iommu_flushall()
80 #define IOPTE_IS_DUMMY(iommu, iopte) \ argument
81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) in iopte_make_dummy() argument
88 val |= iommu->dummy_page_pa; in iopte_make_dummy()
93 int iommu_table_init(struct iommu *iommu, int tsbsize, in iommu_table_init() argument
102 /* Setup initial software IOMMU state. */ in iommu_table_init()
103 spin_lock_init(&iommu->lock); in iommu_table_init()
104 iommu->ctx_lowest_free = 1; in iommu_table_init()
105 iommu->tbl.table_map_base = dma_offset; in iommu_table_init()
106 iommu->dma_addr_mask = dma_addr_mask; in iommu_table_init()
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init()
112 if (!iommu->tbl.map) in iommu_table_init()
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init()
124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); in iommu_table_init()
127 iommu->dummy_page = (unsigned long) page_address(page); in iommu_table_init()
128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE); in iommu_table_init()
129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); in iommu_table_init()
131 /* Now allocate and setup the IOMMU page table itself. */ in iommu_table_init()
135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); in iommu_table_init()
138 iommu->page_table = (iopte_t *)page_address(page); in iommu_table_init()
141 iopte_make_dummy(iommu, &iommu->page_table[i]); in iommu_table_init()
146 free_page(iommu->dummy_page); in iommu_table_init()
147 iommu->dummy_page = 0UL; in iommu_table_init()
150 kfree(iommu->tbl.map); in iommu_table_init()
151 iommu->tbl.map = NULL; in iommu_table_init()
157 struct iommu *iommu, in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
167 return iommu->page_table + entry; in alloc_npages()
170 static int iommu_alloc_ctx(struct iommu *iommu) in iommu_alloc_ctx() argument
172 int lowest = iommu->ctx_lowest_free; in iommu_alloc_ctx()
173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); in iommu_alloc_ctx()
176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); in iommu_alloc_ctx()
178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); in iommu_alloc_ctx()
183 __set_bit(n, iommu->ctx_bitmap); in iommu_alloc_ctx()
188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx) in iommu_free_ctx() argument
191 __clear_bit(ctx, iommu->ctx_bitmap); in iommu_free_ctx()
192 if (ctx < iommu->ctx_lowest_free) in iommu_free_ctx()
193 iommu->ctx_lowest_free = ctx; in iommu_free_ctx()
202 struct iommu *iommu; in dma_4u_alloc_coherent() local
221 iommu = dev->archdata.iommu; in dma_4u_alloc_coherent()
223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); in dma_4u_alloc_coherent()
230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent()
231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_alloc_coherent()
250 struct iommu *iommu; in dma_4u_free_coherent() local
254 iommu = dev->archdata.iommu; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
268 struct iommu *iommu; in dma_4u_map_page() local
276 iommu = dev->archdata.iommu; in dma_4u_map_page()
286 base = alloc_npages(dev, iommu, npages); in dma_4u_map_page()
287 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_page()
289 if (iommu->iommu_ctxflush) in dma_4u_map_page()
290 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_page()
291 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_page()
296 bus_addr = (iommu->tbl.table_map_base + in dma_4u_map_page()
297 ((base - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_map_page()
313 iommu_free_ctx(iommu, ctx); in dma_4u_map_page()
320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, in strbuf_flush() argument
327 iommu->iommu_ctxflush) { in strbuf_flush()
370 (void) iommu_read(iommu->write_complete_reg); in strbuf_flush()
390 struct iommu *iommu; in dma_4u_unmap_page() local
401 iommu = dev->archdata.iommu; in dma_4u_unmap_page()
406 base = iommu->page_table + in dma_4u_unmap_page()
407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); in dma_4u_unmap_page()
410 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_page()
414 if (iommu->iommu_ctxflush) in dma_4u_unmap_page()
419 strbuf_flush(strbuf, iommu, bus_addr, ctx, in dma_4u_unmap_page()
424 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_page()
426 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_page()
427 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_page()
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4u_unmap_page()
443 struct iommu *iommu; in dma_4u_map_sg() local
448 iommu = dev->archdata.iommu; in dma_4u_map_sg()
450 if (nelems == 0 || !iommu) in dma_4u_map_sg()
453 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_sg()
456 if (iommu->iommu_ctxflush) in dma_4u_map_sg()
457 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_sg()
476 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; in dma_4u_map_sg()
487 /* Allocate iommu entries for that segment */ in dma_4u_map_sg()
490 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, in dma_4u_map_sg()
496 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" in dma_4u_map_sg()
497 " npages %lx\n", iommu, paddr, npages); in dma_4u_map_sg()
501 base = iommu->page_table + entry; in dma_4u_map_sg()
504 dma_addr = iommu->tbl.table_map_base + in dma_4u_map_sg()
545 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
564 entry = (vaddr - iommu->tbl.table_map_base) in dma_4u_map_sg()
566 base = iommu->page_table + entry; in dma_4u_map_sg()
569 iopte_make_dummy(iommu, base + j); in dma_4u_map_sg()
571 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, in dma_4u_map_sg()
579 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
587 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) in fetch_sg_ctx() argument
591 if (iommu->iommu_ctxflush) { in fetch_sg_ctx()
594 struct iommu_map_table *tbl = &iommu->tbl; in fetch_sg_ctx()
597 base = iommu->page_table + in fetch_sg_ctx()
612 struct iommu *iommu; in dma_4u_unmap_sg() local
616 iommu = dev->archdata.iommu; in dma_4u_unmap_sg()
619 ctx = fetch_sg_ctx(iommu, sglist); in dma_4u_unmap_sg()
621 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_sg()
635 entry = ((dma_handle - iommu->tbl.table_map_base) in dma_4u_unmap_sg()
637 base = iommu->page_table + entry; in dma_4u_unmap_sg()
641 strbuf_flush(strbuf, iommu, dma_handle, ctx, in dma_4u_unmap_sg()
645 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_sg()
647 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, in dma_4u_unmap_sg()
652 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_sg()
654 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_sg()
661 struct iommu *iommu; in dma_4u_sync_single_for_cpu() local
665 iommu = dev->archdata.iommu; in dma_4u_sync_single_for_cpu()
671 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
679 if (iommu->iommu_ctxflush && in dma_4u_sync_single_for_cpu()
682 struct iommu_map_table *tbl = &iommu->tbl; in dma_4u_sync_single_for_cpu()
684 iopte = iommu->page_table + in dma_4u_sync_single_for_cpu()
690 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_single_for_cpu()
692 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
699 struct iommu *iommu; in dma_4u_sync_sg_for_cpu() local
705 iommu = dev->archdata.iommu; in dma_4u_sync_sg_for_cpu()
711 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
715 if (iommu->iommu_ctxflush && in dma_4u_sync_sg_for_cpu()
718 struct iommu_map_table *tbl = &iommu->tbl; in dma_4u_sync_sg_for_cpu()
720 iopte = iommu->page_table + ((sglist[0].dma_address - in dma_4u_sync_sg_for_cpu()
736 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_sg_for_cpu()
738 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
743 struct iommu *iommu = dev->archdata.iommu; in dma_4u_supported() local
748 if (device_mask < iommu->dma_addr_mask) in dma_4u_supported()