Lines Matching full:mapping

3  *  linux/arch/arm/mm/dma-mapping.c
7 * DMA uncached mapping support.
420 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
783 * Create userspace mapping for the DMA-coherent memory.
801 * Free a buffer as defined by the above mapping.
910 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1101 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1103 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument
1109 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
1120 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
1121 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
1122 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1123 mapping->bits, 0, count, align); in __alloc_iova()
1125 if (start > mapping->bits) in __alloc_iova()
1128 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1133 * No unused range found. Try to extend the existing mapping in __alloc_iova()
1137 if (i == mapping->nr_bitmaps) { in __alloc_iova()
1138 if (extend_iommu_mapping(mapping)) { in __alloc_iova()
1139 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1143 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1144 mapping->bits, 0, count, align); in __alloc_iova()
1146 if (start > mapping->bits) { in __alloc_iova()
1147 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1151 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1153 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1155 iova = mapping->base + (mapping_size * i); in __alloc_iova()
1161 static inline void __free_iova(struct dma_iommu_mapping *mapping, in __free_iova() argument
1165 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
1173 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
1174 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
1176 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
1191 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
1192 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
1193 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
1309 * Create a mapping in device IO address space for specified pages
1315 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in __iommu_create_mapping() local
1320 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
1337 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1346 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1347 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1353 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in __iommu_remove_mapping() local
1362 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1363 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1522 * free a page as defined by the above mapping.
1584 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in __map_sg_chunk() local
1594 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1607 ret = iommu_map(mapping->domain, iova, phys, len, prot); in __map_sg_chunk()
1617 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1618 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1812 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_coherent_iommu_map_page() local
1816 dma_addr = __alloc_iova(mapping, len); in arm_coherent_iommu_map_page()
1822 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_coherent_iommu_map_page()
1828 __free_iova(mapping, dma_addr, len); in arm_coherent_iommu_map_page()
1864 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_coherent_iommu_unmap_page() local
1872 iommu_unmap(mapping->domain, iova, len); in arm_coherent_iommu_unmap_page()
1873 __free_iova(mapping, iova, len); in arm_coherent_iommu_unmap_page()
1888 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_iommu_unmap_page() local
1890 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
1900 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
1901 __free_iova(mapping, iova, len); in arm_iommu_unmap_page()
1915 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_iommu_map_resource() local
1922 dma_addr = __alloc_iova(mapping, len); in arm_iommu_map_resource()
1928 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); in arm_iommu_map_resource()
1934 __free_iova(mapping, dma_addr, len); in arm_iommu_map_resource()
1949 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_iommu_unmap_resource() local
1957 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
1958 __free_iova(mapping, iova, len); in arm_iommu_unmap_resource()
1964 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_iommu_sync_single_for_cpu() local
1966 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
1978 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_iommu_sync_single_for_device() local
1980 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
2035 * Creates a mapping structure which holds information about used/unused
2037 * mapping with IOMMU aware functions.
2039 * The client device need to be attached to the mapping with
2047 struct dma_iommu_mapping *mapping; in arm_iommu_create_mapping() local
2063 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); in arm_iommu_create_mapping()
2064 if (!mapping) in arm_iommu_create_mapping()
2067 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
2068 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
2070 if (!mapping->bitmaps) in arm_iommu_create_mapping()
2073 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
2074 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
2077 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
2078 mapping->extensions = extensions; in arm_iommu_create_mapping()
2079 mapping->base = base; in arm_iommu_create_mapping()
2080 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
2082 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
2084 mapping->domain = iommu_domain_alloc(bus); in arm_iommu_create_mapping()
2085 if (!mapping->domain) in arm_iommu_create_mapping()
2088 kref_init(&mapping->kref); in arm_iommu_create_mapping()
2089 return mapping; in arm_iommu_create_mapping()
2091 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
2093 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
2095 kfree(mapping); in arm_iommu_create_mapping()
2104 struct dma_iommu_mapping *mapping = in release_iommu_mapping() local
2107 iommu_domain_free(mapping->domain); in release_iommu_mapping()
2108 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
2109 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
2110 kfree(mapping->bitmaps); in release_iommu_mapping()
2111 kfree(mapping); in release_iommu_mapping()
2114 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) in extend_iommu_mapping() argument
2118 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
2121 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
2122 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
2124 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
2127 mapping->nr_bitmaps++; in extend_iommu_mapping()
2132 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) in arm_iommu_release_mapping() argument
2134 if (mapping) in arm_iommu_release_mapping()
2135 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
2140 struct dma_iommu_mapping *mapping) in __arm_iommu_attach_device() argument
2144 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
2148 kref_get(&mapping->kref); in __arm_iommu_attach_device()
2149 to_dma_iommu_mapping(dev) = mapping; in __arm_iommu_attach_device()
2158 * @mapping: io address space mapping structure (returned from
2161 * Attaches specified io address space mapping to the provided device.
2166 * mapping.
2169 struct dma_iommu_mapping *mapping) in arm_iommu_attach_device() argument
2173 err = __arm_iommu_attach_device(dev, mapping); in arm_iommu_attach_device()
2191 struct dma_iommu_mapping *mapping; in arm_iommu_detach_device() local
2193 mapping = to_dma_iommu_mapping(dev); in arm_iommu_detach_device()
2194 if (!mapping) { in arm_iommu_detach_device()
2199 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
2200 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
2216 struct dma_iommu_mapping *mapping; in arm_setup_iommu_dma_ops() local
2221 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
2222 if (IS_ERR(mapping)) { in arm_setup_iommu_dma_ops()
2223 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
2228 if (__arm_iommu_attach_device(dev, mapping)) { in arm_setup_iommu_dma_ops()
2231 arm_iommu_release_mapping(mapping); in arm_setup_iommu_dma_ops()
2240 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); in arm_teardown_iommu_dma_ops() local
2242 if (!mapping) in arm_teardown_iommu_dma_ops()
2246 arm_iommu_release_mapping(mapping); in arm_teardown_iommu_dma_ops()