Lines Matching refs:tbl

164 				       struct iommu_table *tbl,  in iommu_range_alloc()  argument
198 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
201 pool = &(tbl->large_pool); in iommu_range_alloc()
203 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
223 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
224 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
231 pool = &(tbl->pools[0]); in iommu_range_alloc()
241 1 << tbl->it_page_shift); in iommu_range_alloc()
243 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc()
246 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
247 boundary_size >> tbl->it_page_shift, align_mask); in iommu_range_alloc()
255 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
258 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
259 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
280 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
281 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
293 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
303 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
308 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
309 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
312 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
314 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
322 __iommu_free(tbl, ret, npages); in iommu_alloc()
327 if (tbl->it_ops->flush) in iommu_alloc()
328 tbl->it_ops->flush(tbl); in iommu_alloc()
336 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
341 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
342 free_entry = entry - tbl->it_offset; in iommu_free_check()
344 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
345 (entry < tbl->it_offset)) { in iommu_free_check()
350 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
351 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
352 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
353 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
354 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
364 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
368 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
372 p = &tbl->large_pool; in get_pool()
374 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
376 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
377 p = &tbl->pools[pool_nr]; in get_pool()
383 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
390 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
391 free_entry = entry - tbl->it_offset; in __iommu_free()
393 pool = get_pool(tbl, free_entry); in __iommu_free()
395 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
398 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
401 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
405 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
408 __iommu_free(tbl, dma_addr, npages); in iommu_free()
414 if (tbl->it_ops->flush) in iommu_free()
415 tbl->it_ops->flush(tbl); in iommu_free()
418 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
432 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
457 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
459 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
461 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
462 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
463 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
472 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
478 entry += tbl->it_offset; in ppc_iommu_map_sg()
479 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
480 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
486 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
487 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
525 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
526 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
549 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
551 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
552 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
563 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
571 if (!tbl) in ppc_iommu_unmap_sg()
582 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
583 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
591 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
592 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
595 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
604 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
609 if (tbl->it_ops->get) { in iommu_table_clear()
613 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
614 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
619 __set_bit(index, tbl->it_map); in iommu_table_clear()
624 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
628 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
629 index < tbl->it_size; index++) in iommu_table_clear()
630 __clear_bit(index, tbl->it_map); in iommu_table_clear()
636 static void iommu_table_reserve_pages(struct iommu_table *tbl, in iommu_table_reserve_pages() argument
647 if (tbl->it_offset == 0) in iommu_table_reserve_pages()
648 set_bit(0, tbl->it_map); in iommu_table_reserve_pages()
650 tbl->it_reserved_start = res_start; in iommu_table_reserve_pages()
651 tbl->it_reserved_end = res_end; in iommu_table_reserve_pages()
655 (tbl->it_offset + tbl->it_size < res_start || in iommu_table_reserve_pages()
656 res_end < tbl->it_offset)) in iommu_table_reserve_pages()
659 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_reserve_pages()
660 set_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_reserve_pages()
663 static void iommu_table_release_pages(struct iommu_table *tbl) in iommu_table_release_pages() argument
671 if (tbl->it_offset == 0) in iommu_table_release_pages()
672 clear_bit(0, tbl->it_map); in iommu_table_release_pages()
674 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_release_pages()
675 clear_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_release_pages()
682 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, in iommu_init_table() argument
691 BUG_ON(!tbl->it_ops); in iommu_init_table()
694 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
699 tbl->it_map = page_address(page); in iommu_init_table()
700 memset(tbl->it_map, 0, sz); in iommu_init_table()
702 iommu_table_reserve_pages(tbl, res_start, res_end); in iommu_init_table()
705 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
706 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
708 tbl->nr_pools = 1; in iommu_init_table()
711 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
713 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
714 p = &tbl->pools[i]; in iommu_init_table()
716 p->start = tbl->poolsize * i; in iommu_init_table()
718 p->end = p->start + tbl->poolsize; in iommu_init_table()
721 p = &tbl->large_pool; in iommu_init_table()
723 p->start = tbl->poolsize * i; in iommu_init_table()
725 p->end = tbl->it_size; in iommu_init_table()
727 iommu_table_clear(tbl); in iommu_init_table()
735 return tbl; in iommu_init_table()
742 struct iommu_table *tbl; in iommu_table_free() local
744 tbl = container_of(kref, struct iommu_table, it_kref); in iommu_table_free()
746 if (tbl->it_ops->free) in iommu_table_free()
747 tbl->it_ops->free(tbl); in iommu_table_free()
749 if (!tbl->it_map) { in iommu_table_free()
750 kfree(tbl); in iommu_table_free()
754 iommu_table_release_pages(tbl); in iommu_table_free()
757 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_table_free()
761 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_table_free()
765 free_pages((unsigned long) tbl->it_map, order); in iommu_table_free()
768 kfree(tbl); in iommu_table_free()
771 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) in iommu_tce_table_get() argument
773 if (kref_get_unless_zero(&tbl->it_kref)) in iommu_tce_table_get()
774 return tbl; in iommu_tce_table_get()
780 int iommu_tce_table_put(struct iommu_table *tbl) in iommu_tce_table_put() argument
782 if (WARN_ON(!tbl)) in iommu_tce_table_put()
785 return kref_put(&tbl->it_kref, iommu_table_free); in iommu_tce_table_put()
794 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
809 if (tbl) { in iommu_map_page()
810 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
812 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
814 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
816 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
817 mask >> tbl->it_page_shift, align, in iommu_map_page()
823 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
827 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
833 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
841 if (tbl) { in iommu_unmap_page()
843 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
844 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
852 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
876 if (!tbl) in iommu_alloc_coherent()
887 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
888 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
889 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
890 mask >> tbl->it_page_shift, io_order, 0); in iommu_alloc_coherent()
899 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
902 if (tbl) { in iommu_free_coherent()
906 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
907 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
974 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
977 if (tbl->it_ops->flush) in iommu_flush_tce()
978 tbl->it_ops->flush(tbl); in iommu_flush_tce()
1017 struct iommu_table *tbl, in iommu_tce_xchg_no_kill() argument
1024 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); in iommu_tce_xchg_no_kill()
1027 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill()
1035 void iommu_tce_kill(struct iommu_table *tbl, in iommu_tce_kill() argument
1038 if (tbl->it_ops->tce_kill) in iommu_tce_kill()
1039 tbl->it_ops->tce_kill(tbl, entry, pages, false); in iommu_tce_kill()
1043 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1045 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1055 if (!tbl->it_ops->xchg_no_kill) in iommu_take_ownership()
1058 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1059 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1060 spin_lock(&tbl->pools[i].lock); in iommu_take_ownership()
1062 iommu_table_release_pages(tbl); in iommu_take_ownership()
1064 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { in iommu_take_ownership()
1068 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_take_ownership()
1069 tbl->it_reserved_end); in iommu_take_ownership()
1071 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1074 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1075 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1076 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1082 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1084 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1086 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1087 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1088 spin_lock(&tbl->pools[i].lock); in iommu_release_ownership()
1090 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1092 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_release_ownership()
1093 tbl->it_reserved_end); in iommu_release_ownership()
1095 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1096 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1097 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()