Lines Matching full:mem
137 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_print_info() local
139 if (!mem->nslabs) { in swiotlb_print_info()
140 pr_warn("No low mem\n"); in swiotlb_print_info()
144 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
145 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
166 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_update_mem_attributes() local
170 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
172 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes()
173 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
178 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, in swiotlb_init_io_tlb_mem() argument
184 mem->nslabs = nslabs; in swiotlb_init_io_tlb_mem()
185 mem->start = start; in swiotlb_init_io_tlb_mem()
186 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem()
187 mem->index = 0; in swiotlb_init_io_tlb_mem()
188 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_mem()
191 mem->force_bounce = true; in swiotlb_init_io_tlb_mem()
193 spin_lock_init(&mem->lock); in swiotlb_init_io_tlb_mem()
194 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_mem()
195 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
196 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
197 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
204 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_init_with_tbl() local
211 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_init_with_tbl()
214 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_with_tbl()
215 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_with_tbl()
216 if (!mem->slots) in swiotlb_init_with_tbl()
220 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); in swiotlb_init_with_tbl()
224 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_init_with_tbl()
306 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_late_init_with_tbl() local
313 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_late_init_with_tbl()
316 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_late_init_with_tbl()
317 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_late_init_with_tbl()
318 if (!mem->slots) in swiotlb_late_init_with_tbl()
322 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); in swiotlb_late_init_with_tbl()
325 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_late_init_with_tbl()
331 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_exit() local
335 if (!mem->nslabs) in swiotlb_exit()
339 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
340 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
341 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
344 if (mem->late_alloc) { in swiotlb_exit()
346 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
348 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
349 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
352 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
369 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce() local
370 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
371 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
372 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
450 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) in wrap_index() argument
452 if (index >= mem->nslabs) in wrap_index()
464 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
467 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_find_slots()
487 spin_lock_irqsave(&mem->lock, flags); in swiotlb_find_slots()
488 if (unlikely(nslots > mem->nslabs - mem->used)) in swiotlb_find_slots()
491 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); in swiotlb_find_slots()
496 index = wrap_index(mem, index + 1); in swiotlb_find_slots()
508 if (mem->slots[index].list >= nslots) in swiotlb_find_slots()
511 index = wrap_index(mem, index + stride); in swiotlb_find_slots()
515 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
520 mem->slots[i].list = 0; in swiotlb_find_slots()
521 mem->slots[i].alloc_size = in swiotlb_find_slots()
526 mem->slots[i].list; i--) in swiotlb_find_slots()
527 mem->slots[i].list = ++count; in swiotlb_find_slots()
532 if (index + nslots < mem->nslabs) in swiotlb_find_slots()
533 mem->index = index + nslots; in swiotlb_find_slots()
535 mem->index = 0; in swiotlb_find_slots()
536 mem->used += nslots; in swiotlb_find_slots()
538 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
546 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
552 if (!mem) in swiotlb_tbl_map_single()
569 alloc_size, mem->nslabs, mem->used); in swiotlb_tbl_map_single()
579 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
580 tlb_addr = slot_addr(mem->start, index) + offset; in swiotlb_tbl_map_single()
589 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots() local
592 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
593 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
602 spin_lock_irqsave(&mem->lock, flags); in swiotlb_release_slots()
604 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
613 mem->slots[i].list = ++count; in swiotlb_release_slots()
614 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
615 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
623 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
625 mem->slots[i].list = ++count; in swiotlb_release_slots()
626 mem->used -= nslots; in swiotlb_release_slots()
627 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_release_slots()
706 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
708 return mem && mem->nslabs; in is_swiotlb_active()
715 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) in swiotlb_create_debugfs_files() argument
717 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
718 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); in swiotlb_create_debugfs_files()
723 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_create_default_debugfs() local
726 if (mem->nslabs) { in swiotlb_create_default_debugfs()
727 mem->debugfs = debugfs_dir; in swiotlb_create_default_debugfs()
728 swiotlb_create_debugfs_files(mem); in swiotlb_create_default_debugfs()
742 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_debugfs_init() local
744 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); in rmem_swiotlb_debugfs_init()
745 swiotlb_create_debugfs_files(mem); in rmem_swiotlb_debugfs_init()
755 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
759 if (!mem) in swiotlb_alloc()
766 tlb_addr = slot_addr(mem->start, index); in swiotlb_alloc()
786 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
794 if (!mem) { in rmem_swiotlb_device_init()
795 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
796 if (!mem) in rmem_swiotlb_device_init()
799 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), in rmem_swiotlb_device_init()
801 if (!mem->slots) { in rmem_swiotlb_device_init()
802 kfree(mem); in rmem_swiotlb_device_init()
808 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); in rmem_swiotlb_device_init()
809 mem->force_bounce = true; in rmem_swiotlb_device_init()
810 mem->for_alloc = true; in rmem_swiotlb_device_init()
812 rmem->priv = mem; in rmem_swiotlb_device_init()
817 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()