Lines Matching +full:i +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * I/O TLBs (aka DMA address translation hardware).
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
21 #define pr_fmt(fmt) "software IO TLB: " fmt
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
55 #include <linux/iommu-helper.h>
60 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
63 * Minimum IO TLB size to bother booting with. Systems with mainly
87 /* avoid tail segment of size < IO_TLB_SEGSIZE */ in setup_io_tlb_npages()
121 void __init swiotlb_adjust_size(unsigned long size) in swiotlb_adjust_size() argument
126 * adjust/expand SWIOTLB size for their use. in swiotlb_adjust_size()
130 size = ALIGN(size, IO_TLB_SIZE); in swiotlb_adjust_size()
131 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); in swiotlb_adjust_size()
132 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); in swiotlb_adjust_size()
139 if (!mem->nslabs) { in swiotlb_print_info()
144 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
145 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
150 return val & (IO_TLB_SEGSIZE - 1); in io_tlb_offset()
170 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
172 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes()
173 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
182 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; in swiotlb_init_io_tlb_mem() local
184 mem->nslabs = nslabs; in swiotlb_init_io_tlb_mem()
185 mem->start = start; in swiotlb_init_io_tlb_mem()
186 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem()
187 mem->index = 0; in swiotlb_init_io_tlb_mem()
188 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_mem()
191 mem->force_bounce = true; in swiotlb_init_io_tlb_mem()
193 spin_lock_init(&mem->lock); in swiotlb_init_io_tlb_mem()
194 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_mem()
195 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
196 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
197 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
202 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) in swiotlb_init_with_tbl() argument
211 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_init_with_tbl()
212 return -ENOMEM; in swiotlb_init_with_tbl()
214 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_with_tbl()
215 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_with_tbl()
216 if (!mem->slots) in swiotlb_init_with_tbl()
220 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); in swiotlb_init_with_tbl()
224 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_init_with_tbl()
230 * structures for the software IO TLB used to implement the DMA API.
236 void *tlb; in swiotlb_init() local
241 /* Get IO TLB memory from the low pages */ in swiotlb_init()
242 tlb = memblock_alloc_low(bytes, PAGE_SIZE); in swiotlb_init()
243 if (!tlb) in swiotlb_init()
245 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) in swiotlb_init()
250 memblock_free_early(__pa(tlb), bytes); in swiotlb_init()
274 * Get IO TLB memory from the low pages in swiotlb_late_init_with_default_size()
285 order--; in swiotlb_late_init_with_default_size()
289 return -ENOMEM; in swiotlb_late_init_with_default_size()
304 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) in swiotlb_late_init_with_tbl() argument
313 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_late_init_with_tbl()
314 return -ENOMEM; in swiotlb_late_init_with_tbl()
316 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_late_init_with_tbl()
317 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_late_init_with_tbl()
318 if (!mem->slots) in swiotlb_late_init_with_tbl()
319 return -ENOMEM; in swiotlb_late_init_with_tbl()
321 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); in swiotlb_late_init_with_tbl()
322 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); in swiotlb_late_init_with_tbl()
325 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_late_init_with_tbl()
335 if (!mem->nslabs) in swiotlb_exit()
339 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
340 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
341 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
344 if (mem->late_alloc) { in swiotlb_exit()
346 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
348 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
349 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
360 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); in swiotlb_align_offset()
366 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, in swiotlb_bounce() argument
369 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce()
370 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
371 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
372 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
380 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); in swiotlb_bounce()
389 tlb_offset -= orig_addr_offset; in swiotlb_bounce()
392 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", in swiotlb_bounce()
393 alloc_size, size, tlb_offset); in swiotlb_bounce()
398 alloc_size -= tlb_offset; in swiotlb_bounce()
400 if (size > alloc_size) { in swiotlb_bounce()
402 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", in swiotlb_bounce()
403 alloc_size, size); in swiotlb_bounce()
404 size = alloc_size; in swiotlb_bounce()
414 while (size) { in swiotlb_bounce()
415 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
426 size -= sz; in swiotlb_bounce()
432 memcpy(vaddr, phys_to_virt(orig_addr), size); in swiotlb_bounce()
434 memcpy(phys_to_virt(orig_addr), vaddr, size); in swiotlb_bounce()
446 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); in get_max_slots()
452 if (index >= mem->nslabs) in wrap_index()
458 * Find a suitable number of IO TLB entries size that will fit this request and
459 * allocate a buffer from that IO TLB pool.
464 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots()
467 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_find_slots()
470 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); in swiotlb_find_slots()
472 unsigned int index, wrap, count = 0, i; in swiotlb_find_slots() local
485 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); in swiotlb_find_slots()
487 spin_lock_irqsave(&mem->lock, flags); in swiotlb_find_slots()
488 if (unlikely(nslots > mem->nslabs - mem->used)) in swiotlb_find_slots()
491 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); in swiotlb_find_slots()
508 if (mem->slots[index].list >= nslots) in swiotlb_find_slots()
515 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
516 return -1; in swiotlb_find_slots()
519 for (i = index; i < index + nslots; i++) { in swiotlb_find_slots()
520 mem->slots[i].list = 0; in swiotlb_find_slots()
521 mem->slots[i].alloc_size = in swiotlb_find_slots()
522 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); in swiotlb_find_slots()
524 for (i = index - 1; in swiotlb_find_slots()
525 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && in swiotlb_find_slots()
526 mem->slots[i].list; i--) in swiotlb_find_slots()
527 mem->slots[i].list = ++count; in swiotlb_find_slots()
532 if (index + nslots < mem->nslabs) in swiotlb_find_slots()
533 mem->index = index + nslots; in swiotlb_find_slots()
535 mem->index = 0; in swiotlb_find_slots()
536 mem->used += nslots; in swiotlb_find_slots()
538 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
546 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single()
548 unsigned int i; in swiotlb_tbl_map_single() local
565 if (index == -1) { in swiotlb_tbl_map_single()
569 alloc_size, mem->nslabs, mem->used); in swiotlb_tbl_map_single()
578 for (i = 0; i < nr_slots(alloc_size + offset); i++) in swiotlb_tbl_map_single()
579 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
580 tlb_addr = slot_addr(mem->start, index) + offset; in swiotlb_tbl_map_single()
589 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots()
592 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
593 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
594 int count, i; in swiotlb_release_slots() local
602 spin_lock_irqsave(&mem->lock, flags); in swiotlb_release_slots()
604 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
612 for (i = index + nslots - 1; i >= index; i--) { in swiotlb_release_slots()
613 mem->slots[i].list = ++count; in swiotlb_release_slots()
614 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
615 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
622 for (i = index - 1; in swiotlb_release_slots()
623 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
624 i--) in swiotlb_release_slots()
625 mem->slots[i].list = ++count; in swiotlb_release_slots()
626 mem->used -= nslots; in swiotlb_release_slots()
627 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_release_slots()
648 size_t size, enum dma_data_direction dir) in swiotlb_sync_single_for_device() argument
651 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); in swiotlb_sync_single_for_device()
657 size_t size, enum dma_data_direction dir) in swiotlb_sync_single_for_cpu() argument
660 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); in swiotlb_sync_single_for_cpu()
669 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, in swiotlb_map() argument
675 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, in swiotlb_map()
678 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir, in swiotlb_map()
685 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { in swiotlb_map()
686 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, in swiotlb_map()
690 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
695 arch_sync_dma_for_device(swiotlb_addr, size, dir); in swiotlb_map()
706 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active()
708 return mem && mem->nslabs; in is_swiotlb_active()
717 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
718 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); in swiotlb_create_debugfs_files()
726 if (mem->nslabs) { in swiotlb_create_default_debugfs()
727 mem->debugfs = debugfs_dir; in swiotlb_create_default_debugfs()
742 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_debugfs_init()
744 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); in rmem_swiotlb_debugfs_init()
753 struct page *swiotlb_alloc(struct device *dev, size_t size) in swiotlb_alloc() argument
755 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc()
762 index = swiotlb_find_slots(dev, 0, size); in swiotlb_alloc()
763 if (index == -1) in swiotlb_alloc()
766 tlb_addr = slot_addr(mem->start, index); in swiotlb_alloc()
771 bool swiotlb_free(struct device *dev, struct page *page, size_t size) in swiotlb_free() argument
786 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init()
787 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; in rmem_swiotlb_device_init()
797 return -ENOMEM; in rmem_swiotlb_device_init()
799 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), in rmem_swiotlb_device_init()
801 if (!mem->slots) { in rmem_swiotlb_device_init()
803 return -ENOMEM; in rmem_swiotlb_device_init()
806 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), in rmem_swiotlb_device_init()
807 rmem->size >> PAGE_SHIFT); in rmem_swiotlb_device_init()
808 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); in rmem_swiotlb_device_init()
809 mem->force_bounce = true; in rmem_swiotlb_device_init()
810 mem->for_alloc = true; in rmem_swiotlb_device_init()
812 rmem->priv = mem; in rmem_swiotlb_device_init()
817 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()
825 dev->dma_io_tlb_mem = &io_tlb_default_mem; in rmem_swiotlb_device_release()
835 unsigned long node = rmem->fdt_node; in rmem_swiotlb_setup()
838 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || in rmem_swiotlb_setup()
839 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || in rmem_swiotlb_setup()
840 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_swiotlb_setup()
841 return -EINVAL; in rmem_swiotlb_setup()
843 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { in rmem_swiotlb_setup()
845 return -EINVAL; in rmem_swiotlb_setup()
848 rmem->ops = &rmem_swiotlb_ops; in rmem_swiotlb_setup()
849 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", in rmem_swiotlb_setup()
850 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_swiotlb_setup()
854 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);