Lines Matching +full:page +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
33 #include <asm/dma-iommu.h>
36 #include <asm/xen/xen-ops.h>
55 struct page *page; member
64 struct page **ret_page);
84 if (buf->virt == virt) { in arm_dma_buffer_find()
85 list_del(&buf->list); in arm_dma_buffer_find()
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
111 * lurking in the kernel direct-mapped region is invalidated. in __dma_clear_buffer()
113 if (PageHighMem(page)) { in __dma_clear_buffer()
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
117 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
122 page++; in __dma_clear_buffer()
123 size -= PAGE_SIZE; in __dma_clear_buffer()
128 void *ptr = page_address(page); in __dma_clear_buffer()
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer()
145 struct page *page, *p, *e; in __dma_alloc_buffer() local
147 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
148 if (!page) in __dma_alloc_buffer()
152 * Now split the huge page and free the excess pages in __dma_alloc_buffer()
154 split_page(page, order); in __dma_alloc_buffer()
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
158 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
160 return page; in __dma_alloc_buffer()
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
168 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
170 while (page < e) { in __dma_free_buffer()
171 __free_page(page); in __dma_free_buffer()
172 page++; in __dma_free_buffer()
177 pgprot_t prot, struct page **ret_page,
182 pgprot_t prot, struct page **ret_page,
204 struct page *page; in atomic_pool_init() local
207 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
211 * The atomic pool is only used for non-coherent allocations in atomic_pool_init()
216 &page, atomic_pool_init, true, NORMAL, in atomic_pool_init()
220 &page, atomic_pool_init, true); in atomic_pool_init()
225 page_to_phys(page), in atomic_pool_init()
226 atomic_pool_size, -1); in atomic_pool_init()
244 return -ENOMEM; in atomic_pool_init()
284 map.length = end - start; in dma_contiguous_remap()
288 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
310 struct page *page = virt_to_page((void *)addr); in __dma_update_pte() local
313 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
317 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
319 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
327 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
330 struct page *page; in __alloc_remap_buffer() local
334 * non-coherent in __alloc_remap_buffer()
336 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
337 if (!page) in __alloc_remap_buffer()
342 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
344 __dma_free_buffer(page, size); in __alloc_remap_buffer()
349 *ret_page = page; in __alloc_remap_buffer()
353 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool()
390 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
396 struct page *page; in __alloc_from_contiguous() local
399 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); in __alloc_from_contiguous()
400 if (!page) in __alloc_from_contiguous()
403 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
408 if (PageHighMem(page)) { in __alloc_from_contiguous()
409 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
411 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
415 __dma_remap(page, size, prot); in __alloc_from_contiguous()
416 ptr = page_address(page); in __alloc_from_contiguous()
420 *ret_page = page; in __alloc_from_contiguous()
424 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
428 if (PageHighMem(page)) in __free_from_contiguous()
431 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
433 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
445 struct page **ret_page) in __alloc_simple_buffer()
447 struct page *page; in __alloc_simple_buffer() local
449 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
450 if (!page) in __alloc_simple_buffer()
453 *ret_page = page; in __alloc_simple_buffer()
454 return page_address(page); in __alloc_simple_buffer()
458 struct page **ret_page) in simple_allocator_alloc()
460 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
466 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
475 struct page **ret_page) in cma_allocator_alloc()
477 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
478 ret_page, args->caller, in cma_allocator_alloc()
479 args->want_vaddr, args->coherent_flag, in cma_allocator_alloc()
480 args->gfp); in cma_allocator_alloc()
485 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
486 args->size, args->want_vaddr); in cma_allocator_free()
495 struct page **ret_page) in pool_allocator_alloc()
497 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
502 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
511 struct page **ret_page) in remap_allocator_alloc()
513 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
514 args->prot, ret_page, args->caller, in remap_allocator_alloc()
515 args->want_vaddr); in remap_allocator_alloc()
520 if (args->want_vaddr) in remap_allocator_free()
521 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
523 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
535 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in __dma_alloc()
536 struct page *page = NULL; in __dma_alloc() local
568 * Following is a work-around (a.k.a. hack) to prevent pages in __dma_alloc()
582 buf->allocator = &cma_allocator; in __dma_alloc()
584 buf->allocator = &simple_allocator; in __dma_alloc()
586 buf->allocator = &remap_allocator; in __dma_alloc()
588 buf->allocator = &pool_allocator; in __dma_alloc()
590 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
592 if (page) { in __dma_alloc()
595 *handle = phys_to_dma(dev, page_to_phys(page)); in __dma_alloc()
596 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
599 list_add(&buf->list, &arm_dma_bufs); in __dma_alloc()
605 return args.want_vaddr ? addr : page; in __dma_alloc()
615 struct page *page = phys_to_page(dma_to_phys(dev, handle)); in __arm_dma_free() local
621 .page = page, in __arm_dma_free()
629 buf->allocator->free(&args); in __arm_dma_free()
633 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
640 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
641 offset %= PAGE_SIZE; in dma_cache_maint_page()
653 page = pfn_to_page(pfn); in dma_cache_maint_page()
655 if (PageHighMem(page)) { in dma_cache_maint_page()
656 if (len + offset > PAGE_SIZE) in dma_cache_maint_page()
657 len = PAGE_SIZE - offset; in dma_cache_maint_page()
660 vaddr = kmap_atomic(page); in dma_cache_maint_page()
661 op(vaddr + offset, len, dir); in dma_cache_maint_page()
664 vaddr = kmap_high_get(page); in dma_cache_maint_page()
666 op(vaddr + offset, len, dir); in dma_cache_maint_page()
667 kunmap_high(page); in dma_cache_maint_page()
671 vaddr = page_address(page) + offset; in dma_cache_maint_page()
674 offset = 0; in dma_cache_maint_page()
676 left -= len; in dma_cache_maint_page()
683 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
685 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
690 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
692 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
698 /* FIXME: non-speculating: flush on bidirectional mappings? */ in __dma_page_cpu_to_dev()
701 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
704 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
706 /* FIXME: non-speculating: not required */ in __dma_page_dev_to_cpu()
711 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
715 * Mark the D-cache clean for these pages to avoid extra flushing. in __dma_page_dev_to_cpu()
721 pfn = page_to_pfn(page) + off / PAGE_SIZE; in __dma_page_dev_to_cpu()
725 left -= PAGE_SIZE - off; in __dma_page_dev_to_cpu()
728 page = pfn_to_page(pfn++); in __dma_page_dev_to_cpu()
729 set_bit(PG_dcache_clean, &page->flags); in __dma_page_dev_to_cpu()
730 left -= PAGE_SIZE; in __dma_page_dev_to_cpu()
766 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
775 align = (1 << order) - 1; in __alloc_iova()
777 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
778 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
779 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
780 mapping->bits, 0, count, align); in __alloc_iova()
782 if (start > mapping->bits) in __alloc_iova()
785 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
794 if (i == mapping->nr_bitmaps) { in __alloc_iova()
796 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
800 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
801 mapping->bits, 0, count, align); in __alloc_iova()
803 if (start > mapping->bits) { in __alloc_iova()
804 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
808 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
810 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
812 iova = mapping->base + (mapping_size * i); in __alloc_iova()
822 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
830 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
831 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
833 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
835 start = (addr - bitmap_base) >> PAGE_SHIFT; in __free_iova()
848 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
849 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
850 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
856 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer()
860 struct page **pages; in __iommu_alloc_buffer()
862 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
876 struct page *page; in __iommu_alloc_buffer() local
878 page = dma_alloc_from_contiguous(dev, count, order, in __iommu_alloc_buffer()
880 if (!page) in __iommu_alloc_buffer()
883 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
886 pages[i] = page + i; in __iommu_alloc_buffer()
893 order_idx = ARRAY_SIZE(iommu_order_array) - 1; in __iommu_alloc_buffer()
912 /* See if it's easy to allocate a high-order chunk */ in __iommu_alloc_buffer()
929 while (--j) in __iommu_alloc_buffer()
935 count -= 1 << order; in __iommu_alloc_buffer()
940 while (i--) in __iommu_alloc_buffer()
947 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
969 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping()
993 len = (j - i) << PAGE_SHIFT; in __iommu_create_mapping()
994 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1003 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1013 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1014 * result to page size in __iommu_remove_mapping()
1019 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1024 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1026 struct page *page; in __atomic_get_pages() local
1030 page = phys_to_page(phys); in __atomic_get_pages()
1032 return (struct page **)page; in __atomic_get_pages()
1035 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) in __iommu_get_pages()
1050 struct page *page; in __iommu_alloc_simple() local
1054 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1056 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1060 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1085 struct page **pages; in arm_iommu_alloc_attrs()
1087 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_alloc_attrs()
1097 * Following is a work-around (a.k.a. hack) to prevent pages in arm_iommu_alloc_attrs()
1134 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs()
1139 return -ENXIO; in arm_iommu_mmap_attrs()
1141 if (vma->vm_pgoff >= nr_pages) in arm_iommu_mmap_attrs()
1142 return -ENXIO; in arm_iommu_mmap_attrs()
1144 if (!dev->dma_coherent) in arm_iommu_mmap_attrs()
1145 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_iommu_mmap_attrs()
1155 * free a page as defined by the above mapping.
1161 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_free_attrs()
1162 struct page **pages; in arm_iommu_free_attrs()
1188 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1191 return -ENXIO; in arm_iommu_get_sgtable()
1198 * Map a part of the scatter-gather list into contiguous io address space
1216 return -ENOMEM; in __map_sg_chunk()
1220 unsigned int len = PAGE_ALIGN(s->offset + s->length); in __map_sg_chunk()
1222 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in __map_sg_chunk()
1223 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in __map_sg_chunk()
1227 ret = iommu_map(mapping->domain, iova, phys, len, prot); in __map_sg_chunk()
1237 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1243 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1259 unsigned int offset = s->offset; in arm_iommu_map_sg() local
1260 unsigned int size = s->offset + s->length; in arm_iommu_map_sg()
1266 s->dma_length = 0; in arm_iommu_map_sg()
1268 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in arm_iommu_map_sg()
1270 &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1274 dma->dma_address += offset; in arm_iommu_map_sg()
1275 dma->dma_length = size - offset; in arm_iommu_map_sg()
1277 size = offset = s->offset; in arm_iommu_map_sg()
1282 size += s->length; in arm_iommu_map_sg()
1284 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1288 dma->dma_address += offset; in arm_iommu_map_sg()
1289 dma->dma_length = size - offset; in arm_iommu_map_sg()
1296 if (ret == -ENOMEM) in arm_iommu_map_sg()
1298 return -EINVAL; in arm_iommu_map_sg()
1302 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1323 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_unmap_sg()
1324 __dma_page_dev_to_cpu(sg_page(s), s->offset, in arm_iommu_unmap_sg()
1325 s->length, dir); in arm_iommu_unmap_sg()
1343 if (dev->dma_coherent) in arm_iommu_sync_sg_for_cpu()
1347 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_cpu()
1365 if (dev->dma_coherent) in arm_iommu_sync_sg_for_device()
1369 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_device()
1375 * @page: page that buffer resides in
1376 * @offset: offset into page for start of buffer
1382 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1383 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1388 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_iommu_map_page()
1390 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_map_page()
1391 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1399 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_iommu_map_page()
1403 return dma_addr + offset; in arm_iommu_map_page()
1423 struct page *page; in arm_iommu_unmap_page() local
1424 int offset = handle & ~PAGE_MASK; in arm_iommu_unmap_page() local
1425 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1430 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { in arm_iommu_unmap_page()
1431 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
1432 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1435 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
1440 * arm_iommu_map_resource - map a device resource for DMA
1454 unsigned int offset = phys_addr & ~PAGE_MASK; in arm_iommu_map_resource() local
1455 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_map_resource()
1463 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); in arm_iommu_map_resource()
1467 return dma_addr + offset; in arm_iommu_map_resource()
1474 * arm_iommu_unmap_resource - unmap a device DMA resource
1486 unsigned int offset = dma_handle & ~PAGE_MASK; in arm_iommu_unmap_resource() local
1487 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_resource()
1492 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
1501 struct page *page; in arm_iommu_sync_single_for_cpu() local
1502 unsigned int offset = handle & ~PAGE_MASK; in arm_iommu_sync_single_for_cpu() local
1504 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_cpu()
1507 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
1508 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1516 struct page *page; in arm_iommu_sync_single_for_device() local
1517 unsigned int offset = handle & ~PAGE_MASK; in arm_iommu_sync_single_for_device() local
1519 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_device()
1522 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
1523 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
1566 int err = -ENOMEM; in arm_iommu_create_mapping()
1568 /* currently only 32-bit DMA address space is supported */ in arm_iommu_create_mapping()
1570 return ERR_PTR(-ERANGE); in arm_iommu_create_mapping()
1573 return ERR_PTR(-EINVAL); in arm_iommu_create_mapping()
1584 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
1585 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
1587 if (!mapping->bitmaps) in arm_iommu_create_mapping()
1590 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
1591 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
1594 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
1595 mapping->extensions = extensions; in arm_iommu_create_mapping()
1596 mapping->base = base; in arm_iommu_create_mapping()
1597 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
1599 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
1601 mapping->domain = iommu_domain_alloc(bus); in arm_iommu_create_mapping()
1602 if (!mapping->domain) in arm_iommu_create_mapping()
1605 kref_init(&mapping->kref); in arm_iommu_create_mapping()
1608 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
1610 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
1624 iommu_domain_free(mapping->domain); in release_iommu_mapping()
1625 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
1626 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
1627 kfree(mapping->bitmaps); in release_iommu_mapping()
1635 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
1636 return -EINVAL; in extend_iommu_mapping()
1638 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
1639 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
1641 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
1642 return -ENOMEM; in extend_iommu_mapping()
1644 mapping->nr_bitmaps++; in extend_iommu_mapping()
1652 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
1661 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
1665 kref_get(&mapping->kref); in __arm_iommu_attach_device()
1704 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1716 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
1717 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
1730 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
1732 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
1773 * Due to legacy code that sets the ->dma_coherent flag from a bus in arch_setup_dma_ops()
1774 * notifier we can't just assign coherent to the ->dma_coherent flag in arch_setup_dma_ops()
1779 dev->dma_coherent = true; in arch_setup_dma_ops()
1786 if (dev->dma_ops) in arch_setup_dma_ops()
1793 dev->archdata.dma_ops_setup = true; in arch_setup_dma_ops()
1798 if (!dev->archdata.dma_ops_setup) in arch_teardown_dma_ops()
1802 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ in arch_teardown_dma_ops()
1809 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_device()
1816 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_cpu()