Lines Matching +full:iommu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
2 /* iommu.c: Generic sparc64 IOMMU support.
13 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
17 #include <asm/iommu-common.h>
23 #include <asm/iommu.h>
29 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
31 (*((STC)->strbuf_flushflag) = 0UL)
33 (*((STC)->strbuf_flushflag) != 0UL)
49 /* Must be invoked under the IOMMU lock. */
52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() local
53 if (iommu->iommu_flushinv) { in iommu_flushall()
54 iommu_write(iommu->iommu_flushinv, ~(u64)0); in iommu_flushall()
59 tag = iommu->iommu_tags; in iommu_flushall()
66 (void) iommu_read(iommu->write_complete_reg); in iommu_flushall()
80 #define IOPTE_IS_DUMMY(iommu, iopte) \ argument
81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) in iopte_make_dummy() argument
88 val |= iommu->dummy_page_pa; in iopte_make_dummy()
93 int iommu_table_init(struct iommu *iommu, int tsbsize, in iommu_table_init() argument
102 /* Setup initial software IOMMU state. */ in iommu_table_init()
103 spin_lock_init(&iommu->lock); in iommu_table_init()
104 iommu->ctx_lowest_free = 1; in iommu_table_init()
105 iommu->tbl.table_map_base = dma_offset; in iommu_table_init()
106 iommu->dma_addr_mask = dma_addr_mask; in iommu_table_init()
108 /* Allocate and initialize the free area map. */ in iommu_table_init()
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init()
112 if (!iommu->tbl.map) in iommu_table_init()
113 return -ENOMEM; in iommu_table_init()
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init()
124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); in iommu_table_init()
127 iommu->dummy_page = (unsigned long) page_address(page); in iommu_table_init()
128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE); in iommu_table_init()
129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); in iommu_table_init()
131 /* Now allocate and setup the IOMMU page table itself. */ in iommu_table_init()
135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); in iommu_table_init()
138 iommu->page_table = (iopte_t *)page_address(page); in iommu_table_init()
141 iopte_make_dummy(iommu, &iommu->page_table[i]); in iommu_table_init()
146 free_page(iommu->dummy_page); in iommu_table_init()
147 iommu->dummy_page = 0UL; in iommu_table_init()
150 kfree(iommu->tbl.map); in iommu_table_init()
151 iommu->tbl.map = NULL; in iommu_table_init()
153 return -ENOMEM; in iommu_table_init()
157 struct iommu *iommu, in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
163 (unsigned long)(-1), 0); in alloc_npages()
167 return iommu->page_table + entry; in alloc_npages()
170 static int iommu_alloc_ctx(struct iommu *iommu) in iommu_alloc_ctx() argument
172 int lowest = iommu->ctx_lowest_free; in iommu_alloc_ctx()
173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); in iommu_alloc_ctx()
176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); in iommu_alloc_ctx()
178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); in iommu_alloc_ctx()
183 __set_bit(n, iommu->ctx_bitmap); in iommu_alloc_ctx()
188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx) in iommu_free_ctx() argument
191 __clear_bit(ctx, iommu->ctx_bitmap); in iommu_free_ctx()
192 if (ctx < iommu->ctx_lowest_free) in iommu_free_ctx()
193 iommu->ctx_lowest_free = ctx; in iommu_free_ctx()
202 struct iommu *iommu; in dma_4u_alloc_coherent() local
213 nid = dev->archdata.numa_node; in dma_4u_alloc_coherent()
221 iommu = dev->archdata.iommu; in dma_4u_alloc_coherent()
223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); in dma_4u_alloc_coherent()
230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent()
231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
250 struct iommu *iommu; in dma_4u_free_coherent() local
254 iommu = dev->archdata.iommu; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
268 struct iommu *iommu; in dma_4u_map_page() local
276 iommu = dev->archdata.iommu; in dma_4u_map_page()
277 strbuf = dev->archdata.stc; in dma_4u_map_page()
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
286 base = alloc_npages(dev, iommu, npages); in dma_4u_map_page()
287 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_page()
289 if (iommu->iommu_ctxflush) in dma_4u_map_page()
290 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_page()
291 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_page()
296 bus_addr = (iommu->tbl.table_map_base + in dma_4u_map_page()
297 ((base - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_map_page()
300 if (strbuf->strbuf_enabled) in dma_4u_map_page()
313 iommu_free_ctx(iommu, ctx); in dma_4u_map_page()
320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, in strbuf_flush() argument
326 if (strbuf->strbuf_ctxflush && in strbuf_flush()
327 iommu->iommu_ctxflush) { in strbuf_flush()
331 flushreg = strbuf->strbuf_ctxflush; in strbuf_flush()
357 iommu_write(strbuf->strbuf_pflush, vaddr); in strbuf_flush()
362 * the streaming cache, no flush-flag synchronization needs in strbuf_flush()
369 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); in strbuf_flush()
370 (void) iommu_read(iommu->write_complete_reg); in strbuf_flush()
374 limit--; in strbuf_flush()
390 struct iommu *iommu; in dma_4u_unmap_page() local
401 iommu = dev->archdata.iommu; in dma_4u_unmap_page()
402 strbuf = dev->archdata.stc; in dma_4u_unmap_page()
404 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); in dma_4u_unmap_page()
406 base = iommu->page_table + in dma_4u_unmap_page()
407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); in dma_4u_unmap_page()
410 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_page()
414 if (iommu->iommu_ctxflush) in dma_4u_unmap_page()
418 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in dma_4u_unmap_page()
419 strbuf_flush(strbuf, iommu, bus_addr, ctx, in dma_4u_unmap_page()
424 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_page()
426 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_page()
427 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_page()
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4u_unmap_page()
443 struct iommu *iommu; in dma_4u_map_sg() local
448 iommu = dev->archdata.iommu; in dma_4u_map_sg()
449 strbuf = dev->archdata.stc; in dma_4u_map_sg()
450 if (nelems == 0 || !iommu) in dma_4u_map_sg()
451 return -EINVAL; in dma_4u_map_sg()
453 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_sg()
456 if (iommu->iommu_ctxflush) in dma_4u_map_sg()
457 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_sg()
459 if (strbuf->strbuf_enabled) in dma_4u_map_sg()
472 outs->dma_length = 0; in dma_4u_map_sg()
476 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; in dma_4u_map_sg()
481 slen = s->length; in dma_4u_map_sg()
487 /* Allocate iommu entries for that segment */ in dma_4u_map_sg()
490 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, in dma_4u_map_sg()
491 &handle, (unsigned long)(-1), 0); in dma_4u_map_sg()
496 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" in dma_4u_map_sg()
497 " npages %lx\n", iommu, paddr, npages); in dma_4u_map_sg()
501 base = iommu->page_table + entry; in dma_4u_map_sg()
504 dma_addr = iommu->tbl.table_map_base + in dma_4u_map_sg()
506 dma_addr |= (s->offset & ~IO_PAGE_MASK); in dma_4u_map_sg()
510 while (npages--) { in dma_4u_map_sg()
519 * - allocated dma_addr isn't contiguous to previous allocation in dma_4u_map_sg()
522 (outs->dma_length + s->length > max_seg_size) || in dma_4u_map_sg()
530 outs->dma_length += s->length; in dma_4u_map_sg()
536 outs->dma_address = dma_addr; in dma_4u_map_sg()
537 outs->dma_length = slen; in dma_4u_map_sg()
545 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
549 outs->dma_length = 0; in dma_4u_map_sg()
556 if (s->dma_length != 0) { in dma_4u_map_sg()
560 vaddr = s->dma_address & IO_PAGE_MASK; in dma_4u_map_sg()
561 npages = iommu_num_pages(s->dma_address, s->dma_length, in dma_4u_map_sg()
564 entry = (vaddr - iommu->tbl.table_map_base) in dma_4u_map_sg()
566 base = iommu->page_table + entry; in dma_4u_map_sg()
569 iopte_make_dummy(iommu, base + j); in dma_4u_map_sg()
571 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, in dma_4u_map_sg()
574 s->dma_length = 0; in dma_4u_map_sg()
579 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
581 return -EINVAL; in dma_4u_map_sg()
587 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) in fetch_sg_ctx() argument
591 if (iommu->iommu_ctxflush) { in fetch_sg_ctx()
594 struct iommu_map_table *tbl = &iommu->tbl; in fetch_sg_ctx()
596 bus_addr = sg->dma_address & IO_PAGE_MASK; in fetch_sg_ctx()
597 base = iommu->page_table + in fetch_sg_ctx()
598 ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); in fetch_sg_ctx()
612 struct iommu *iommu; in dma_4u_unmap_sg() local
616 iommu = dev->archdata.iommu; in dma_4u_unmap_sg()
617 strbuf = dev->archdata.stc; in dma_4u_unmap_sg()
619 ctx = fetch_sg_ctx(iommu, sglist); in dma_4u_unmap_sg()
621 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_sg()
624 while (nelems--) { in dma_4u_unmap_sg()
625 dma_addr_t dma_handle = sg->dma_address; in dma_4u_unmap_sg()
626 unsigned int len = sg->dma_length; in dma_4u_unmap_sg()
635 entry = ((dma_handle - iommu->tbl.table_map_base) in dma_4u_unmap_sg()
637 base = iommu->page_table + entry; in dma_4u_unmap_sg()
640 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in dma_4u_unmap_sg()
641 strbuf_flush(strbuf, iommu, dma_handle, ctx, in dma_4u_unmap_sg()
645 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_sg()
647 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, in dma_4u_unmap_sg()
652 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_sg()
654 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_sg()
661 struct iommu *iommu; in dma_4u_sync_single_for_cpu() local
665 iommu = dev->archdata.iommu; in dma_4u_sync_single_for_cpu()
666 strbuf = dev->archdata.stc; in dma_4u_sync_single_for_cpu()
668 if (!strbuf->strbuf_enabled) in dma_4u_sync_single_for_cpu()
671 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
673 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); in dma_4u_sync_single_for_cpu()
679 if (iommu->iommu_ctxflush && in dma_4u_sync_single_for_cpu()
680 strbuf->strbuf_ctxflush) { in dma_4u_sync_single_for_cpu()
682 struct iommu_map_table *tbl = &iommu->tbl; in dma_4u_sync_single_for_cpu()
684 iopte = iommu->page_table + in dma_4u_sync_single_for_cpu()
685 ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); in dma_4u_sync_single_for_cpu()
690 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_single_for_cpu()
692 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
699 struct iommu *iommu; in dma_4u_sync_sg_for_cpu() local
705 iommu = dev->archdata.iommu; in dma_4u_sync_sg_for_cpu()
706 strbuf = dev->archdata.stc; in dma_4u_sync_sg_for_cpu()
708 if (!strbuf->strbuf_enabled) in dma_4u_sync_sg_for_cpu()
711 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
715 if (iommu->iommu_ctxflush && in dma_4u_sync_sg_for_cpu()
716 strbuf->strbuf_ctxflush) { in dma_4u_sync_sg_for_cpu()
718 struct iommu_map_table *tbl = &iommu->tbl; in dma_4u_sync_sg_for_cpu()
720 iopte = iommu->page_table + ((sglist[0].dma_address - in dma_4u_sync_sg_for_cpu()
721 tbl->table_map_base) >> IO_PAGE_SHIFT); in dma_4u_sync_sg_for_cpu()
729 if (sg->dma_length == 0) in dma_4u_sync_sg_for_cpu()
734 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) in dma_4u_sync_sg_for_cpu()
735 - bus_addr) >> IO_PAGE_SHIFT; in dma_4u_sync_sg_for_cpu()
736 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_sg_for_cpu()
738 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
743 struct iommu *iommu = dev->archdata.iommu; in dma_4u_supported() local
748 if (device_mask < iommu->dma_addr_mask) in dma_4u_supported()