Lines Matching full:size

46 	size_t size;  member
56 size_t size; member
119 * @size: size of buffer to map
129 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_dma_map_page() argument
133 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
138 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_dma_map_page() argument
148 * @size: size of buffer (same as passed to dma_map_page)
151 * Unmap a page streaming mode DMA translation. The handle and size
159 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_dma_unmap_page() argument
163 handle & ~PAGE_MASK, size, dir); in arm_dma_unmap_page()
167 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_cpu() argument
171 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
175 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_device() argument
179 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_sync_single_for_device()
201 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
203 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
206 void *cpu_addr, dma_addr_t dma_addr, size_t size,
265 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
273 phys_addr_t end = base + size; in __dma_clear_buffer()
274 while (size > 0) { in __dma_clear_buffer()
281 size -= PAGE_SIZE; in __dma_clear_buffer()
287 memset(ptr, 0, size); in __dma_clear_buffer()
289 dmac_flush_range(ptr, ptr + size); in __dma_clear_buffer()
290 outer_flush_range(__pa(ptr), __pa(ptr) + size); in __dma_clear_buffer()
296 * Allocate a DMA buffer for 'dev' of size 'size' using the
297 * specified gfp mask. Note that 'size' must be page aligned.
299 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer() argument
302 unsigned long order = get_order(size); in __dma_alloc_buffer()
313 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
316 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
322 * Free a DMA buffer. 'size' must be page aligned.
324 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
326 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
334 static void *__alloc_from_contiguous(struct device *dev, size_t size,
339 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
411 unsigned long size; member
418 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
421 dma_mmu_remap[dma_mmu_remap_num].size = size; in dma_contiguous_early_fixup()
430 phys_addr_t end = start + dma_mmu_remap[i].size; in dma_contiguous_remap()
473 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
476 unsigned end = start + size; in __dma_remap()
478 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); in __dma_remap()
482 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_remap_buffer() argument
492 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
498 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
500 __dma_free_buffer(page, size); in __alloc_remap_buffer()
509 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool() argument
519 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
530 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
532 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
535 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
537 if (!__in_atomic_pool(start, size)) in __free_from_pool()
540 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
545 static void *__alloc_from_contiguous(struct device *dev, size_t size, in __alloc_from_contiguous() argument
550 unsigned long order = get_order(size); in __alloc_from_contiguous()
551 size_t count = size >> PAGE_SHIFT; in __alloc_from_contiguous()
559 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
565 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
571 __dma_remap(page, size, prot); in __alloc_from_contiguous()
581 void *cpu_addr, size_t size, bool want_vaddr) in __free_from_contiguous() argument
585 dma_common_free_remap(cpu_addr, size); in __free_from_contiguous()
587 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
589 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
600 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_simple_buffer() argument
605 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
616 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
622 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
633 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
642 args->size, args->want_vaddr); in cma_allocator_free()
653 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
658 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
669 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
677 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
679 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
687 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in __dma_alloc() argument
698 .size = PAGE_ALIGN(size), in __dma_alloc()
708 if (limit && size >= limit) { in __dma_alloc()
710 size, mask); in __dma_alloc()
771 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in arm_dma_alloc() argument
776 return __dma_alloc(dev, size, handle, gfp, prot, false, in arm_dma_alloc()
780 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, in arm_coherent_dma_alloc() argument
783 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, in arm_coherent_dma_alloc()
788 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __arm_dma_mmap() argument
793 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __arm_dma_mmap()
797 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in __arm_dma_mmap()
814 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_coherent_dma_mmap() argument
817 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_coherent_dma_mmap()
821 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_dma_mmap() argument
825 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_dma_mmap()
831 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in __arm_dma_free() argument
839 .size = PAGE_ALIGN(size), in __arm_dma_free()
853 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_dma_free() argument
856 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); in arm_dma_free()
859 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_coherent_dma_free() argument
862 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); in arm_coherent_dma_free()
866 void *cpu_addr, dma_addr_t handle, size_t size, in arm_dma_get_sgtable() argument
883 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
888 size_t size, enum dma_data_direction dir, in dma_cache_maint_page() argument
892 size_t left = size; in dma_cache_maint_page()
941 size_t size, enum dma_data_direction dir) in __dma_page_cpu_to_dev() argument
945 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
949 outer_inv_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
951 outer_clean_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
957 size_t size, enum dma_data_direction dir) in __dma_page_dev_to_cpu() argument
964 outer_inv_range(paddr, paddr + size); in __dma_page_dev_to_cpu()
966 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
972 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { in __dma_page_dev_to_cpu()
974 size_t left = size; in __dma_page_dev_to_cpu()
1141 size_t size) in __alloc_iova() argument
1143 unsigned int order = get_order(size); in __alloc_iova()
1154 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __alloc_iova()
1172 * address range of size bytes. in __alloc_iova()
1199 dma_addr_t addr, size_t size) in __free_iova() argument
1207 if (!size) in __free_iova()
1217 if (addr + size > bitmap_base + mapping_size) { in __free_iova()
1226 count = size >> PAGE_SHIFT; in __free_iova()
1236 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer() argument
1241 int count = size >> PAGE_SHIFT; in __iommu_alloc_buffer()
1255 unsigned long order = get_order(size); in __iommu_alloc_buffer()
1263 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
1328 size_t size, unsigned long attrs) in __iommu_free_buffer() argument
1330 int count = size >> PAGE_SHIFT; in __iommu_free_buffer()
1349 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping() argument
1353 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_create_mapping()
1357 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
1384 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1388 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
1393 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1394 * result to page size in __iommu_remove_mapping()
1396 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); in __iommu_remove_mapping()
1399 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1400 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1426 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, in __iommu_alloc_simple() argument
1434 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1436 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1440 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1447 __free_from_pool(addr, size); in __iommu_alloc_simple()
1452 dma_addr_t handle, size_t size, int coherent_flag) in __iommu_free_atomic() argument
1454 __iommu_remove_mapping(dev, handle, size); in __iommu_free_atomic()
1456 __dma_free_buffer(virt_to_page(cpu_addr), size); in __iommu_free_atomic()
1458 __free_from_pool(cpu_addr, size); in __iommu_free_atomic()
1461 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, in __arm_iommu_alloc_attrs() argument
1470 size = PAGE_ALIGN(size); in __arm_iommu_alloc_attrs()
1473 return __iommu_alloc_simple(dev, size, gfp, handle, in __arm_iommu_alloc_attrs()
1485 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); in __arm_iommu_alloc_attrs()
1489 *handle = __iommu_create_mapping(dev, pages, size, attrs); in __arm_iommu_alloc_attrs()
1496 addr = dma_common_pages_remap(pages, size, prot, in __arm_iommu_alloc_attrs()
1504 __iommu_remove_mapping(dev, *handle, size); in __arm_iommu_alloc_attrs()
1506 __iommu_free_buffer(dev, pages, size, attrs); in __arm_iommu_alloc_attrs()
1510 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, in arm_iommu_alloc_attrs() argument
1513 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); in arm_iommu_alloc_attrs()
1516 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, in arm_coherent_iommu_alloc_attrs() argument
1519 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); in arm_coherent_iommu_alloc_attrs()
1523 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __arm_iommu_mmap_attrs() argument
1527 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __arm_iommu_mmap_attrs()
1544 dma_addr_t dma_addr, size_t size, unsigned long attrs) in arm_iommu_mmap_attrs() argument
1548 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_iommu_mmap_attrs()
1553 dma_addr_t dma_addr, size_t size, unsigned long attrs) in arm_coherent_iommu_mmap_attrs() argument
1555 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_coherent_iommu_mmap_attrs()
1562 void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in __arm_iommu_free_attrs() argument
1566 size = PAGE_ALIGN(size); in __arm_iommu_free_attrs()
1568 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { in __arm_iommu_free_attrs()
1569 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); in __arm_iommu_free_attrs()
1580 dma_common_free_remap(cpu_addr, size); in __arm_iommu_free_attrs()
1582 __iommu_remove_mapping(dev, handle, size); in __arm_iommu_free_attrs()
1583 __iommu_free_buffer(dev, pages, size, attrs); in __arm_iommu_free_attrs()
1586 void arm_iommu_free_attrs(struct device *dev, size_t size, in arm_iommu_free_attrs() argument
1589 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); in arm_iommu_free_attrs()
1592 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, in arm_coherent_iommu_free_attrs() argument
1595 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); in arm_coherent_iommu_free_attrs()
1600 size_t size, unsigned long attrs) in arm_iommu_get_sgtable() argument
1602 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_get_sgtable()
1608 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()
1616 size_t size, dma_addr_t *handle, in __map_sg_chunk() argument
1627 size = PAGE_ALIGN(size); in __map_sg_chunk()
1630 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1634 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { in __map_sg_chunk()
1654 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1665 unsigned int size = s->offset + s->length; in __iommu_map_sg() local
1674 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in __iommu_map_sg()
1675 if (__map_sg_chunk(dev, start, size, &dma->dma_address, in __iommu_map_sg()
1680 dma->dma_length = size - offset; in __iommu_map_sg()
1682 size = offset = s->offset; in __iommu_map_sg()
1687 size += s->length; in __iommu_map_sg()
1689 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, in __iommu_map_sg()
1694 dma->dma_length = size - offset; in __iommu_map_sg()
1832 * @size: size of buffer to map
1838 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_iommu_map_page() argument
1843 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_map_page()
1866 * @size: size of buffer to map
1872 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1876 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1878 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); in arm_iommu_map_page()
1885 * @size: size of buffer (same as passed to dma_map_page)
1891 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_coherent_iommu_unmap_page() argument
1896 int len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_unmap_page()
1909 * @size: size of buffer (same as passed to dma_map_page)
1915 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_iommu_unmap_page() argument
1921 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1927 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1937 * @size: size of resource to map
1941 phys_addr_t phys_addr, size_t size, in arm_iommu_map_resource() argument
1949 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_map_resource()
1971 * @size: size of resource to map
1975 size_t size, enum dma_data_direction dir, in arm_iommu_unmap_resource() argument
1981 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_resource()
1991 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_cpu() argument
2001 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
2005 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_device() argument
2015 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
2062 * @size: maximum size of the valid IO address space
2072 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) in arm_iommu_create_mapping() argument
2074 unsigned int bits = size >> PAGE_SHIFT; in arm_iommu_create_mapping()
2081 if (size > DMA_BIT_MASK(32) + 1) in arm_iommu_create_mapping()
2242 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2250 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
2253 size, dev_name(dev)); in arm_setup_iommu_dma_ops()
2280 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2292 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in arch_setup_dma_ops() argument
2310 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) in arch_setup_dma_ops()
2336 size_t size, enum dma_data_direction dir) in arch_sync_dma_for_device() argument
2339 size, dir); in arch_sync_dma_for_device()
2343 size_t size, enum dma_data_direction dir) in arch_sync_dma_for_cpu() argument
2346 size, dir); in arch_sync_dma_for_cpu()
2355 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, in arch_dma_alloc() argument
2358 return __dma_alloc(dev, size, dma_handle, gfp, in arch_dma_alloc()
2363 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, in arch_dma_free() argument
2366 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); in arch_dma_free()