Lines Matching refs:phys_pg_pack

57 	struct hl_vm_phys_pg_pack *phys_pg_pack;  in alloc_device_memory()  local
86 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); in alloc_device_memory()
87 if (!phys_pg_pack) { in alloc_device_memory()
92 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; in alloc_device_memory()
93 phys_pg_pack->asid = ctx->asid; in alloc_device_memory()
94 phys_pg_pack->npages = num_pgs; in alloc_device_memory()
95 phys_pg_pack->page_size = page_size; in alloc_device_memory()
96 phys_pg_pack->total_size = total_size; in alloc_device_memory()
97 phys_pg_pack->flags = args->flags; in alloc_device_memory()
98 phys_pg_pack->contiguous = contiguous; in alloc_device_memory()
100 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); in alloc_device_memory()
101 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { in alloc_device_memory()
106 if (phys_pg_pack->contiguous) { in alloc_device_memory()
108 phys_pg_pack->pages[i] = paddr + i * page_size; in alloc_device_memory()
111 phys_pg_pack->pages[i] = (u64) gen_pool_alloc( in alloc_device_memory()
114 if (!phys_pg_pack->pages[i]) { in alloc_device_memory()
126 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, in alloc_device_memory()
139 phys_pg_pack->handle = handle; in alloc_device_memory()
141 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); in alloc_device_memory()
142 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); in alloc_device_memory()
150 if (!phys_pg_pack->contiguous) in alloc_device_memory()
152 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], in alloc_device_memory()
155 kvfree(phys_pg_pack->pages); in alloc_device_memory()
157 kfree(phys_pg_pack); in alloc_device_memory()
268 struct hl_vm_phys_pg_pack *phys_pg_pack) in free_phys_pg_pack() argument
273 if (!phys_pg_pack->created_from_userptr) { in free_phys_pg_pack()
274 if (phys_pg_pack->contiguous) { in free_phys_pg_pack()
275 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], in free_phys_pg_pack()
276 phys_pg_pack->total_size); in free_phys_pg_pack()
278 for (i = 0; i < phys_pg_pack->npages ; i++) in free_phys_pg_pack()
282 for (i = 0 ; i < phys_pg_pack->npages ; i++) { in free_phys_pg_pack()
284 phys_pg_pack->pages[i], in free_phys_pg_pack()
285 phys_pg_pack->page_size); in free_phys_pg_pack()
292 kvfree(phys_pg_pack->pages); in free_phys_pg_pack()
293 kfree(phys_pg_pack); in free_phys_pg_pack()
309 struct hl_vm_phys_pg_pack *phys_pg_pack; in free_device_memory() local
312 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in free_device_memory()
313 if (phys_pg_pack) { in free_device_memory()
314 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { in free_device_memory()
329 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); in free_device_memory()
330 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); in free_device_memory()
332 free_phys_pg_pack(hdev, phys_pg_pack); in free_device_memory()
637 struct hl_vm_phys_pg_pack *phys_pg_pack; in init_phys_pg_pack_from_userptr() local
647 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); in init_phys_pg_pack_from_userptr()
648 if (!phys_pg_pack) in init_phys_pg_pack_from_userptr()
651 phys_pg_pack->vm_type = userptr->vm_type; in init_phys_pg_pack_from_userptr()
652 phys_pg_pack->created_from_userptr = true; in init_phys_pg_pack_from_userptr()
653 phys_pg_pack->asid = ctx->asid; in init_phys_pg_pack_from_userptr()
654 atomic_set(&phys_pg_pack->mapping_cnt, 1); in init_phys_pg_pack_from_userptr()
680 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), in init_phys_pg_pack_from_userptr()
682 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { in init_phys_pg_pack_from_userptr()
687 phys_pg_pack->npages = total_npages; in init_phys_pg_pack_from_userptr()
688 phys_pg_pack->page_size = page_size; in init_phys_pg_pack_from_userptr()
689 phys_pg_pack->total_size = total_npages * page_size; in init_phys_pg_pack_from_userptr()
698 phys_pg_pack->offset = dma_addr & (page_size - 1); in init_phys_pg_pack_from_userptr()
703 phys_pg_pack->pages[j++] = dma_addr; in init_phys_pg_pack_from_userptr()
713 *pphys_pg_pack = phys_pg_pack; in init_phys_pg_pack_from_userptr()
718 kfree(phys_pg_pack); in init_phys_pg_pack_from_userptr()
735 struct hl_vm_phys_pg_pack *phys_pg_pack) in map_phys_pg_pack() argument
739 u32 page_size = phys_pg_pack->page_size; in map_phys_pg_pack()
742 for (i = 0 ; i < phys_pg_pack->npages ; i++) { in map_phys_pg_pack()
743 paddr = phys_pg_pack->pages[i]; in map_phys_pg_pack()
746 (i + 1) == phys_pg_pack->npages); in map_phys_pg_pack()
750 phys_pg_pack->handle, phys_pg_pack->npages, in map_phys_pg_pack()
768 phys_pg_pack->handle, next_vaddr, in map_phys_pg_pack()
769 phys_pg_pack->pages[i], page_size); in map_phys_pg_pack()
784 struct hl_vm_phys_pg_pack *phys_pg_pack) in unmap_phys_pg_pack() argument
790 page_size = phys_pg_pack->page_size; in unmap_phys_pg_pack()
793 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { in unmap_phys_pg_pack()
795 (i + 1) == phys_pg_pack->npages)) in unmap_phys_pg_pack()
813 struct hl_vm_phys_pg_pack *phys_pg_pack; in get_paddr_from_handle() local
818 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in get_paddr_from_handle()
819 if (!phys_pg_pack) { in get_paddr_from_handle()
825 *paddr = phys_pg_pack->pages[0]; in get_paddr_from_handle()
851 struct hl_vm_phys_pg_pack *phys_pg_pack; in map_device_va() local
877 &phys_pg_pack); in map_device_va()
887 handle = phys_pg_pack->handle; in map_device_va()
890 if (phys_pg_pack->page_size == page_size) { in map_device_va()
913 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in map_device_va()
914 if (!phys_pg_pack) { in map_device_va()
922 atomic_inc(&phys_pg_pack->mapping_cnt); in map_device_va()
926 vm_type = (enum vm_type_t *) phys_pg_pack; in map_device_va()
939 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && in map_device_va()
940 phys_pg_pack->asid != ctx->asid) { in map_device_va()
954 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size, in map_device_va()
965 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack); in map_device_va()
984 ret_vaddr += phys_pg_pack->offset; in map_device_va()
996 free_phys_pg_pack(hdev, phys_pg_pack); in map_device_va()
1002 ret_vaddr + phys_pg_pack->total_size - 1)) in map_device_va()
1011 atomic_dec(&phys_pg_pack->mapping_cnt); in map_device_va()
1013 free_phys_pg_pack(hdev, phys_pg_pack); in map_device_va()
1035 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; in unmap_device_va() local
1066 &phys_pg_pack); in unmap_device_va()
1074 if (phys_pg_pack->page_size == in unmap_device_va()
1082 phys_pg_pack = hnode->ptr; in unmap_device_va()
1091 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { in unmap_device_va()
1097 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); in unmap_device_va()
1101 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack); in unmap_device_va()
1129 vaddr + phys_pg_pack->total_size - 1); in unmap_device_va()
1139 atomic_dec(&phys_pg_pack->mapping_cnt); in unmap_device_va()
1143 free_phys_pg_pack(hdev, phys_pg_pack); in unmap_device_va()
1151 free_phys_pg_pack(hdev, phys_pg_pack); in unmap_device_va()