Lines Matching full:mem
191 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_print_info() local
193 if (!mem->nslabs) { in swiotlb_print_info()
194 pr_warn("No low mem\n"); in swiotlb_print_info()
198 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
199 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
218 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) in swiotlb_mem_remap() argument
223 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; in swiotlb_mem_remap()
234 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) in swiotlb_mem_remap() argument
248 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_update_mem_attributes() local
252 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
254 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes()
255 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
258 mem->vaddr = swiotlb_mem_remap(mem, bytes); in swiotlb_update_mem_attributes()
259 if (!mem->vaddr) in swiotlb_update_mem_attributes()
260 mem->vaddr = vaddr; in swiotlb_update_mem_attributes()
263 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, in swiotlb_init_io_tlb_mem() argument
270 mem->nslabs = nslabs; in swiotlb_init_io_tlb_mem()
271 mem->start = start; in swiotlb_init_io_tlb_mem()
272 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem()
273 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_mem()
274 mem->nareas = nareas; in swiotlb_init_io_tlb_mem()
275 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_mem()
277 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); in swiotlb_init_io_tlb_mem()
279 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_mem()
280 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_mem()
281 mem->areas[i].index = 0; in swiotlb_init_io_tlb_mem()
282 mem->areas[i].used = 0; in swiotlb_init_io_tlb_mem()
285 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_mem()
286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
288 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
299 mem->vaddr = vaddr; in swiotlb_init_io_tlb_mem()
310 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_init_remap() local
356 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
357 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
358 if (!mem->slots) { in swiotlb_init_remap()
364 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
366 if (!mem->areas) { in swiotlb_init_remap()
367 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
371 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, in swiotlb_init_remap()
391 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_init_late() local
438 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_init_late()
440 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
442 if (!mem->areas) in swiotlb_init_late()
445 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
446 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
447 if (!mem->slots) in swiotlb_init_late()
452 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true, in swiotlb_init_late()
459 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
467 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_exit() local
475 if (!mem->nslabs) in swiotlb_exit()
479 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
480 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
481 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
484 if (mem->late_alloc) { in swiotlb_exit()
485 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
486 mem->nareas)); in swiotlb_exit()
487 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
489 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
491 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
492 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
493 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
494 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
497 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
514 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce() local
515 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
516 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
517 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
519 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
596 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) in wrap_area_index() argument
598 if (index >= mem->area_nslabs) in wrap_area_index()
611 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_do_find_slots() local
612 struct io_tlb_area *area = mem->areas + area_index; in swiotlb_do_find_slots()
615 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_do_find_slots()
627 BUG_ON(area_index >= mem->nareas); in swiotlb_do_find_slots()
640 if (unlikely(nslots > mem->area_nslabs - area->used)) in swiotlb_do_find_slots()
643 slot_base = area_index * mem->area_nslabs; in swiotlb_do_find_slots()
644 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); in swiotlb_do_find_slots()
652 index = wrap_area_index(mem, index + 1); in swiotlb_do_find_slots()
664 if (mem->slots[slot_index].list >= nslots) in swiotlb_do_find_slots()
667 index = wrap_area_index(mem, index + stride); in swiotlb_do_find_slots()
676 mem->slots[i].list = 0; in swiotlb_do_find_slots()
677 mem->slots[i].alloc_size = alloc_size - (offset + in swiotlb_do_find_slots()
682 mem->slots[i].list; i--) in swiotlb_do_find_slots()
683 mem->slots[i].list = ++count; in swiotlb_do_find_slots()
688 if (index + nslots < mem->area_nslabs) in swiotlb_do_find_slots()
700 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
701 int start = raw_smp_processor_id() & (mem->nareas - 1); in swiotlb_find_slots()
709 if (++i >= mem->nareas) in swiotlb_find_slots()
716 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
721 for (i = 0; i < mem->nareas; i++) in mem_used()
722 used += mem->areas[i].used; in mem_used()
731 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
737 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
758 alloc_size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
768 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
769 tlb_addr = slot_addr(mem->start, index) + offset; in swiotlb_tbl_map_single()
783 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots() local
786 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
787 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
788 int aindex = index / mem->area_nslabs; in swiotlb_release_slots()
789 struct io_tlb_area *area = &mem->areas[aindex]; in swiotlb_release_slots()
798 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
802 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
811 mem->slots[i].list = ++count; in swiotlb_release_slots()
812 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
813 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
821 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
823 mem->slots[i].list = ++count; in swiotlb_release_slots()
914 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
916 return mem && mem->nslabs; in is_swiotlb_active()
927 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
930 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
931 if (!mem->nslabs) in swiotlb_create_debugfs_files()
934 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
935 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL, in swiotlb_create_debugfs_files()
953 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
957 if (!mem) in swiotlb_alloc()
964 tlb_addr = slot_addr(mem->start, index); in swiotlb_alloc()
984 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
995 if (!mem) { in rmem_swiotlb_device_init()
996 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
997 if (!mem) in rmem_swiotlb_device_init()
1000 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); in rmem_swiotlb_device_init()
1001 if (!mem->slots) { in rmem_swiotlb_device_init()
1002 kfree(mem); in rmem_swiotlb_device_init()
1006 mem->areas = kcalloc(nareas, sizeof(*mem->areas), in rmem_swiotlb_device_init()
1008 if (!mem->areas) { in rmem_swiotlb_device_init()
1009 kfree(mem->slots); in rmem_swiotlb_device_init()
1010 kfree(mem); in rmem_swiotlb_device_init()
1016 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, in rmem_swiotlb_device_init()
1018 mem->for_alloc = true; in rmem_swiotlb_device_init()
1020 rmem->priv = mem; in rmem_swiotlb_device_init()
1022 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1025 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()