Lines Matching refs:va_block

19 	struct hl_vm_va_block *va_block, *tmp;  in cb_map_mem()  local
52 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); in cb_map_mem()
53 if (!va_block) { in cb_map_mem()
59 va_block->start = virt_addr; in cb_map_mem()
60 va_block->end = virt_addr + page_size; in cb_map_mem()
61 va_block->size = page_size; in cb_map_mem()
62 list_add_tail(&va_block->node, &cb->va_block_list); in cb_map_mem()
69 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
70 rc = hl_mmu_map_page(ctx, va_block->start, bus_addr, in cb_map_mem()
71 va_block->size, list_is_last(&va_block->node, in cb_map_mem()
75 va_block->start); in cb_map_mem()
79 bus_addr += va_block->size; in cb_map_mem()
80 offset += va_block->size; in cb_map_mem()
92 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
95 hl_mmu_unmap_page(ctx, va_block->start, va_block->size, in cb_map_mem()
96 offset <= va_block->size); in cb_map_mem()
97 offset -= va_block->size; in cb_map_mem()
105 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_map_mem()
106 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); in cb_map_mem()
107 list_del(&va_block->node); in cb_map_mem()
108 kfree(va_block); in cb_map_mem()
117 struct hl_vm_va_block *va_block, *tmp; in cb_unmap_mem() local
121 list_for_each_entry(va_block, &cb->va_block_list, node) in cb_unmap_mem()
122 if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size, in cb_unmap_mem()
123 list_is_last(&va_block->node, in cb_unmap_mem()
127 va_block->start); in cb_unmap_mem()
133 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_unmap_mem()
134 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); in cb_unmap_mem()
135 list_del(&va_block->node); in cb_unmap_mem()
136 kfree(va_block); in cb_unmap_mem()