Lines Matching refs:tbl
164 struct iommu_table *tbl, in iommu_range_alloc() argument
197 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
200 pool = &(tbl->large_pool); in iommu_range_alloc()
202 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
222 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
223 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
230 pool = &(tbl->pools[0]); in iommu_range_alloc()
238 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
239 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc()
248 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
251 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
252 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
273 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
274 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
286 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
296 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
301 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
302 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
305 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
307 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
315 __iommu_free(tbl, ret, npages); in iommu_alloc()
320 if (tbl->it_ops->flush) in iommu_alloc()
321 tbl->it_ops->flush(tbl); in iommu_alloc()
329 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
334 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
335 free_entry = entry - tbl->it_offset; in iommu_free_check()
337 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
338 (entry < tbl->it_offset)) { in iommu_free_check()
343 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
344 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
345 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
346 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
347 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
357 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
361 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
365 p = &tbl->large_pool; in get_pool()
367 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
369 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
370 p = &tbl->pools[pool_nr]; in get_pool()
376 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
383 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
384 free_entry = entry - tbl->it_offset; in __iommu_free()
386 pool = get_pool(tbl, free_entry); in __iommu_free()
388 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
391 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
394 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
398 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
401 __iommu_free(tbl, dma_addr, npages); in iommu_free()
407 if (tbl->it_ops->flush) in iommu_free()
408 tbl->it_ops->flush(tbl); in iommu_free()
411 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
425 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
450 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
452 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
454 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
455 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
456 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
465 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
471 entry += tbl->it_offset; in ppc_iommu_map_sg()
472 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
473 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
479 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
480 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
518 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
519 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
542 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
544 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
545 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
556 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
564 if (!tbl) in ppc_iommu_unmap_sg()
575 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
576 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
584 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
585 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
588 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
597 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
602 if (tbl->it_ops->get) { in iommu_table_clear()
606 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
607 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
612 __set_bit(index, tbl->it_map); in iommu_table_clear()
617 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
621 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
622 index < tbl->it_size; index++) in iommu_table_clear()
623 __clear_bit(index, tbl->it_map); in iommu_table_clear()
629 static void iommu_table_reserve_pages(struct iommu_table *tbl, in iommu_table_reserve_pages() argument
640 if (tbl->it_offset == 0) in iommu_table_reserve_pages()
641 set_bit(0, tbl->it_map); in iommu_table_reserve_pages()
643 tbl->it_reserved_start = res_start; in iommu_table_reserve_pages()
644 tbl->it_reserved_end = res_end; in iommu_table_reserve_pages()
648 (tbl->it_offset + tbl->it_size < res_start || in iommu_table_reserve_pages()
649 res_end < tbl->it_offset)) in iommu_table_reserve_pages()
652 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_reserve_pages()
653 set_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_reserve_pages()
656 static void iommu_table_release_pages(struct iommu_table *tbl) in iommu_table_release_pages() argument
664 if (tbl->it_offset == 0) in iommu_table_release_pages()
665 clear_bit(0, tbl->it_map); in iommu_table_release_pages()
667 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_release_pages()
668 clear_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_release_pages()
675 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, in iommu_init_table() argument
684 BUG_ON(!tbl->it_ops); in iommu_init_table()
687 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
692 tbl->it_map = page_address(page); in iommu_init_table()
693 memset(tbl->it_map, 0, sz); in iommu_init_table()
695 iommu_table_reserve_pages(tbl, res_start, res_end); in iommu_init_table()
698 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
699 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
701 tbl->nr_pools = 1; in iommu_init_table()
704 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
706 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
707 p = &tbl->pools[i]; in iommu_init_table()
709 p->start = tbl->poolsize * i; in iommu_init_table()
711 p->end = p->start + tbl->poolsize; in iommu_init_table()
714 p = &tbl->large_pool; in iommu_init_table()
716 p->start = tbl->poolsize * i; in iommu_init_table()
718 p->end = tbl->it_size; in iommu_init_table()
720 iommu_table_clear(tbl); in iommu_init_table()
728 return tbl; in iommu_init_table()
735 struct iommu_table *tbl; in iommu_table_free() local
737 tbl = container_of(kref, struct iommu_table, it_kref); in iommu_table_free()
739 if (tbl->it_ops->free) in iommu_table_free()
740 tbl->it_ops->free(tbl); in iommu_table_free()
742 if (!tbl->it_map) { in iommu_table_free()
743 kfree(tbl); in iommu_table_free()
747 iommu_table_release_pages(tbl); in iommu_table_free()
750 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_table_free()
754 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_table_free()
758 free_pages((unsigned long) tbl->it_map, order); in iommu_table_free()
761 kfree(tbl); in iommu_table_free()
764 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) in iommu_tce_table_get() argument
766 if (kref_get_unless_zero(&tbl->it_kref)) in iommu_tce_table_get()
767 return tbl; in iommu_tce_table_get()
773 int iommu_tce_table_put(struct iommu_table *tbl) in iommu_tce_table_put() argument
775 if (WARN_ON(!tbl)) in iommu_tce_table_put()
778 return kref_put(&tbl->it_kref, iommu_table_free); in iommu_tce_table_put()
787 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
802 if (tbl) { in iommu_map_page()
803 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
805 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
807 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
809 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
810 mask >> tbl->it_page_shift, align, in iommu_map_page()
816 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
820 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
826 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
834 if (tbl) { in iommu_unmap_page()
836 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
837 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
845 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
869 if (!tbl) in iommu_alloc_coherent()
880 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
881 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
882 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
883 mask >> tbl->it_page_shift, io_order, 0); in iommu_alloc_coherent()
892 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
895 if (tbl) { in iommu_free_coherent()
899 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
900 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
967 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
970 if (tbl->it_ops->flush) in iommu_flush_tce()
971 tbl->it_ops->flush(tbl); in iommu_flush_tce()
1010 struct iommu_table *tbl, in iommu_tce_xchg_no_kill() argument
1017 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); in iommu_tce_xchg_no_kill()
1020 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill()
1028 void iommu_tce_kill(struct iommu_table *tbl, in iommu_tce_kill() argument
1031 if (tbl->it_ops->tce_kill) in iommu_tce_kill()
1032 tbl->it_ops->tce_kill(tbl, entry, pages, false); in iommu_tce_kill()
1036 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1038 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1048 if (!tbl->it_ops->xchg_no_kill) in iommu_take_ownership()
1051 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1052 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1053 spin_lock(&tbl->pools[i].lock); in iommu_take_ownership()
1055 iommu_table_release_pages(tbl); in iommu_take_ownership()
1057 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { in iommu_take_ownership()
1061 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_take_ownership()
1062 tbl->it_reserved_end); in iommu_take_ownership()
1064 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1067 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1068 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1069 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1075 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1077 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1079 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1080 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1081 spin_lock(&tbl->pools[i].lock); in iommu_release_ownership()
1083 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1085 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_release_ownership()
1086 tbl->it_reserved_end); in iommu_release_ownership()
1088 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1089 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1090 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()