Lines Matching +full:segment +full:- +full:no +full:- +full:remap
1 // SPDX-License-Identifier: GPL-2.0-only
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
33 #include <linux/iommu-helper.h>
54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
82 * struct io_tlb_area - IO TLB memory area descriptor
101 * otherwise a segment may span two or more areas. It conflicts with free
102 * contiguous slots tracking: free slots are treated contiguous no matter
140 /* avoid tail segment of size < IO_TLB_SEGSIZE */ in setup_io_tlb_npages()
193 if (!mem->nslabs) { in swiotlb_print_info()
194 pr_warn("No low mem\n"); in swiotlb_print_info()
198 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
199 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
204 return val & (IO_TLB_SEGSIZE - 1); in io_tlb_offset()
213 * Remap swioltb memory in the unencrypted physical address space
214 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
223 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; in swiotlb_mem_remap()
252 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
254 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes()
255 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
258 mem->vaddr = swiotlb_mem_remap(mem, bytes); in swiotlb_update_mem_attributes()
259 if (!mem->vaddr) in swiotlb_update_mem_attributes()
260 mem->vaddr = vaddr; in swiotlb_update_mem_attributes()
270 mem->nslabs = nslabs; in swiotlb_init_io_tlb_mem()
271 mem->start = start; in swiotlb_init_io_tlb_mem()
272 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem()
273 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_mem()
274 mem->nareas = nareas; in swiotlb_init_io_tlb_mem()
275 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_mem()
277 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); in swiotlb_init_io_tlb_mem()
279 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_mem()
280 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_mem()
281 mem->areas[i].index = 0; in swiotlb_init_io_tlb_mem()
282 mem->areas[i].used = 0; in swiotlb_init_io_tlb_mem()
285 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_mem()
286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
288 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
299 mem->vaddr = vaddr; in swiotlb_init_io_tlb_mem()
308 int (*remap)(void *tlb, unsigned long nslabs)) in swiotlb_init_remap()
345 if (remap && remap(tlb, nslabs) < 0) { in swiotlb_init_remap()
352 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); in swiotlb_init_remap()
356 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
357 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
358 if (!mem->slots) { in swiotlb_init_remap()
364 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
366 if (!mem->areas) { in swiotlb_init_remap()
367 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
389 int (*remap)(void *tlb, unsigned long nslabs)) in swiotlb_init_late()
410 order--; in swiotlb_init_late()
416 return -ENOMEM; in swiotlb_init_late()
418 if (remap) in swiotlb_init_late()
419 rc = remap(vstart, nslabs); in swiotlb_init_late()
438 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_init_late()
440 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
442 if (!mem->areas) in swiotlb_init_late()
445 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
446 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
447 if (!mem->slots) in swiotlb_init_late()
459 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
462 return -ENOMEM; in swiotlb_init_late()
475 if (!mem->nslabs) in swiotlb_exit()
479 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
480 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
481 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
484 if (mem->late_alloc) { in swiotlb_exit()
485 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
486 mem->nareas)); in swiotlb_exit()
487 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
489 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
491 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
492 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
493 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
494 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
505 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); in swiotlb_align_offset()
514 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce()
515 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
516 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
517 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
519 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
525 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); in swiotlb_bounce()
534 tlb_offset -= orig_addr_offset; in swiotlb_bounce()
543 alloc_size -= tlb_offset; in swiotlb_bounce()
559 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
569 size -= sz; in swiotlb_bounce()
592 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); in get_max_slots()
598 if (index >= mem->area_nslabs) in wrap_area_index()
611 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_do_find_slots()
612 struct io_tlb_area *area = mem->areas + area_index; in swiotlb_do_find_slots()
615 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_do_find_slots()
618 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); in swiotlb_do_find_slots()
627 BUG_ON(area_index >= mem->nareas); in swiotlb_do_find_slots()
636 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); in swiotlb_do_find_slots()
639 spin_lock_irqsave(&area->lock, flags); in swiotlb_do_find_slots()
640 if (unlikely(nslots > mem->area_nslabs - area->used)) in swiotlb_do_find_slots()
643 slot_base = area_index * mem->area_nslabs; in swiotlb_do_find_slots()
644 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); in swiotlb_do_find_slots()
664 if (mem->slots[slot_index].list >= nslots) in swiotlb_do_find_slots()
671 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_do_find_slots()
672 return -1; in swiotlb_do_find_slots()
676 mem->slots[i].list = 0; in swiotlb_do_find_slots()
677 mem->slots[i].alloc_size = alloc_size - (offset + in swiotlb_do_find_slots()
678 ((i - slot_index) << IO_TLB_SHIFT)); in swiotlb_do_find_slots()
680 for (i = slot_index - 1; in swiotlb_do_find_slots()
681 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && in swiotlb_do_find_slots()
682 mem->slots[i].list; i--) in swiotlb_do_find_slots()
683 mem->slots[i].list = ++count; in swiotlb_do_find_slots()
688 if (index + nslots < mem->area_nslabs) in swiotlb_do_find_slots()
689 area->index = index + nslots; in swiotlb_do_find_slots()
691 area->index = 0; in swiotlb_do_find_slots()
692 area->used += nslots; in swiotlb_do_find_slots()
693 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_do_find_slots()
700 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots()
701 int start = raw_smp_processor_id() & (mem->nareas - 1); in swiotlb_find_slots()
709 if (++i >= mem->nareas) in swiotlb_find_slots()
713 return -1; in swiotlb_find_slots()
721 for (i = 0; i < mem->nareas; i++) in mem_used()
722 used += mem->areas[i].used; in mem_used()
731 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single()
737 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
754 if (index == -1) { in swiotlb_tbl_map_single()
758 alloc_size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
768 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
769 tlb_addr = slot_addr(mem->start, index) + offset; in swiotlb_tbl_map_single()
775 * kernel memory) to user-space. in swiotlb_tbl_map_single()
783 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots()
786 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
787 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
788 int aindex = index / mem->area_nslabs; in swiotlb_release_slots()
789 struct io_tlb_area *area = &mem->areas[aindex]; in swiotlb_release_slots()
798 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
800 spin_lock_irqsave(&area->lock, flags); in swiotlb_release_slots()
802 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
810 for (i = index + nslots - 1; i >= index; i--) { in swiotlb_release_slots()
811 mem->slots[i].list = ++count; in swiotlb_release_slots()
812 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
813 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
820 for (i = index - 1; in swiotlb_release_slots()
821 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
822 i--) in swiotlb_release_slots()
823 mem->slots[i].list = ++count; in swiotlb_release_slots()
824 area->used -= nslots; in swiotlb_release_slots()
825 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_release_slots()
887 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
909 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; in swiotlb_max_mapping_size()
914 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active()
916 return mem && mem->nslabs; in is_swiotlb_active()
930 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
931 if (!mem->nslabs) in swiotlb_create_debugfs_files()
934 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
935 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL, in swiotlb_create_debugfs_files()
953 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc()
961 if (index == -1) in swiotlb_alloc()
964 tlb_addr = slot_addr(mem->start, index); in swiotlb_alloc()
984 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init()
985 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; in rmem_swiotlb_device_init()
987 /* Set Per-device io tlb area to one */ in rmem_swiotlb_device_init()
998 return -ENOMEM; in rmem_swiotlb_device_init()
1000 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); in rmem_swiotlb_device_init()
1001 if (!mem->slots) { in rmem_swiotlb_device_init()
1003 return -ENOMEM; in rmem_swiotlb_device_init()
1006 mem->areas = kcalloc(nareas, sizeof(*mem->areas), in rmem_swiotlb_device_init()
1008 if (!mem->areas) { in rmem_swiotlb_device_init()
1009 kfree(mem->slots); in rmem_swiotlb_device_init()
1011 return -ENOMEM; in rmem_swiotlb_device_init()
1014 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), in rmem_swiotlb_device_init()
1015 rmem->size >> PAGE_SHIFT); in rmem_swiotlb_device_init()
1016 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, in rmem_swiotlb_device_init()
1018 mem->for_alloc = true; in rmem_swiotlb_device_init()
1020 rmem->priv = mem; in rmem_swiotlb_device_init()
1022 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1025 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()
1033 dev->dma_io_tlb_mem = &io_tlb_default_mem; in rmem_swiotlb_device_release()
1043 unsigned long node = rmem->fdt_node; in rmem_swiotlb_setup()
1046 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || in rmem_swiotlb_setup()
1047 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || in rmem_swiotlb_setup()
1048 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_swiotlb_setup()
1049 return -EINVAL; in rmem_swiotlb_setup()
1051 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { in rmem_swiotlb_setup()
1053 return -EINVAL; in rmem_swiotlb_setup()
1056 rmem->ops = &rmem_swiotlb_ops; in rmem_swiotlb_setup()
1058 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_swiotlb_setup()
1062 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);