Lines Matching full:slots
102 * contiguous slots tracking: free slots are treated contiguous no matter
286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
288 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
356 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
357 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
358 if (!mem->slots) { in swiotlb_init_remap()
445 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
446 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
447 if (!mem->slots) in swiotlb_init_late()
481 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
489 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
494 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
516 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
517 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
631 * unaligned slots once we found an aligned one. For allocations of in swiotlb_do_find_slots()
664 if (mem->slots[slot_index].list >= nslots) in swiotlb_do_find_slots()
676 mem->slots[i].list = 0; in swiotlb_do_find_slots()
677 mem->slots[i].alloc_size = alloc_size - (offset + in swiotlb_do_find_slots()
682 mem->slots[i].list; i--) in swiotlb_do_find_slots()
683 mem->slots[i].list = ++count; in swiotlb_do_find_slots()
757 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", in swiotlb_tbl_map_single()
768 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
787 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
796 * with slots below and above the pool being returned. in swiotlb_release_slots()
802 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
807 * Step 1: return the slots to the free list, merging the slots with in swiotlb_release_slots()
808 * superceeding slots in swiotlb_release_slots()
811 mem->slots[i].list = ++count; in swiotlb_release_slots()
812 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
813 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
817 * Step 2: merge the returned slots with the preceding slots, if in swiotlb_release_slots()
821 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
823 mem->slots[i].list = ++count; in swiotlb_release_slots()
902 * swiotlb_find_slots() skips slots according to in swiotlb_max_mapping_size()
1000 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); in rmem_swiotlb_device_init()
1001 if (!mem->slots) { in rmem_swiotlb_device_init()
1009 kfree(mem->slots); in rmem_swiotlb_device_init()