Lines Matching refs:phys_pg_pack
58 struct hl_vm_phys_pg_pack *phys_pg_pack; in alloc_device_memory() local
86 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); in alloc_device_memory()
87 if (!phys_pg_pack) { in alloc_device_memory()
92 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; in alloc_device_memory()
93 phys_pg_pack->asid = ctx->asid; in alloc_device_memory()
94 phys_pg_pack->npages = num_pgs; in alloc_device_memory()
95 phys_pg_pack->page_size = page_size; in alloc_device_memory()
96 phys_pg_pack->total_size = total_size; in alloc_device_memory()
97 phys_pg_pack->flags = args->flags; in alloc_device_memory()
98 phys_pg_pack->contiguous = contiguous; in alloc_device_memory()
100 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); in alloc_device_memory()
101 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { in alloc_device_memory()
106 if (phys_pg_pack->contiguous) { in alloc_device_memory()
108 phys_pg_pack->pages[i] = paddr + i * page_size; in alloc_device_memory()
111 phys_pg_pack->pages[i] = (u64) gen_pool_alloc( in alloc_device_memory()
114 if (!phys_pg_pack->pages[i]) { in alloc_device_memory()
126 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, in alloc_device_memory()
139 phys_pg_pack->handle = handle; in alloc_device_memory()
141 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); in alloc_device_memory()
142 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); in alloc_device_memory()
150 if (!phys_pg_pack->contiguous) in alloc_device_memory()
152 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], in alloc_device_memory()
155 kvfree(phys_pg_pack->pages); in alloc_device_memory()
157 kfree(phys_pg_pack); in alloc_device_memory()
271 struct hl_vm_phys_pg_pack *phys_pg_pack) in free_phys_pg_pack() argument
277 if (phys_pg_pack->created_from_userptr) in free_phys_pg_pack()
280 if (phys_pg_pack->contiguous) { in free_phys_pg_pack()
283 phys_pg_pack->pages[0], in free_phys_pg_pack()
284 phys_pg_pack->total_size); in free_phys_pg_pack()
290 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], in free_phys_pg_pack()
291 phys_pg_pack->total_size); in free_phys_pg_pack()
293 for (i = 0; i < phys_pg_pack->npages ; i++) in free_phys_pg_pack()
297 for (i = 0 ; i < phys_pg_pack->npages ; i++) { in free_phys_pg_pack()
301 phys_pg_pack->pages[i], in free_phys_pg_pack()
302 phys_pg_pack->page_size); in free_phys_pg_pack()
308 phys_pg_pack->pages[i], in free_phys_pg_pack()
309 phys_pg_pack->page_size); in free_phys_pg_pack()
319 kvfree(phys_pg_pack->pages); in free_phys_pg_pack()
320 kfree(phys_pg_pack); in free_phys_pg_pack()
337 struct hl_vm_phys_pg_pack *phys_pg_pack; in free_device_memory() local
341 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in free_device_memory()
342 if (phys_pg_pack) { in free_device_memory()
343 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { in free_device_memory()
358 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); in free_device_memory()
359 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); in free_device_memory()
361 return free_phys_pg_pack(hdev, phys_pg_pack); in free_device_memory()
823 struct hl_vm_phys_pg_pack *phys_pg_pack; in init_phys_pg_pack_from_userptr() local
830 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); in init_phys_pg_pack_from_userptr()
831 if (!phys_pg_pack) in init_phys_pg_pack_from_userptr()
834 phys_pg_pack->vm_type = userptr->vm_type; in init_phys_pg_pack_from_userptr()
835 phys_pg_pack->created_from_userptr = true; in init_phys_pg_pack_from_userptr()
836 phys_pg_pack->asid = ctx->asid; in init_phys_pg_pack_from_userptr()
837 atomic_set(&phys_pg_pack->mapping_cnt, 1); in init_phys_pg_pack_from_userptr()
865 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), in init_phys_pg_pack_from_userptr()
867 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { in init_phys_pg_pack_from_userptr()
872 phys_pg_pack->npages = total_npages; in init_phys_pg_pack_from_userptr()
873 phys_pg_pack->page_size = page_size; in init_phys_pg_pack_from_userptr()
874 phys_pg_pack->total_size = total_npages * page_size; in init_phys_pg_pack_from_userptr()
883 phys_pg_pack->offset = dma_addr & (page_size - 1); in init_phys_pg_pack_from_userptr()
888 phys_pg_pack->pages[j++] = dma_addr; in init_phys_pg_pack_from_userptr()
898 *pphys_pg_pack = phys_pg_pack; in init_phys_pg_pack_from_userptr()
903 kfree(phys_pg_pack); in init_phys_pg_pack_from_userptr()
920 struct hl_vm_phys_pg_pack *phys_pg_pack) in map_phys_pg_pack() argument
924 u32 page_size = phys_pg_pack->page_size; in map_phys_pg_pack()
928 for (i = 0 ; i < phys_pg_pack->npages ; i++) { in map_phys_pg_pack()
929 paddr = phys_pg_pack->pages[i]; in map_phys_pg_pack()
932 (i + 1) == phys_pg_pack->npages); in map_phys_pg_pack()
936 phys_pg_pack->handle, phys_pg_pack->npages, in map_phys_pg_pack()
956 phys_pg_pack->handle, next_vaddr, in map_phys_pg_pack()
957 phys_pg_pack->pages[i], page_size); in map_phys_pg_pack()
983 struct hl_vm_phys_pg_pack *phys_pg_pack) in unmap_phys_pg_pack() argument
991 page_size = phys_pg_pack->page_size; in unmap_phys_pg_pack()
994 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { in unmap_phys_pg_pack()
996 (i + 1) == phys_pg_pack->npages)) in unmap_phys_pg_pack()
1018 struct hl_vm_phys_pg_pack *phys_pg_pack; in get_paddr_from_handle() local
1023 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in get_paddr_from_handle()
1024 if (!phys_pg_pack) { in get_paddr_from_handle()
1030 *paddr = phys_pg_pack->pages[0]; in get_paddr_from_handle()
1055 struct hl_vm_phys_pg_pack *phys_pg_pack; in map_device_va() local
1082 &phys_pg_pack, false); in map_device_va()
1092 handle = phys_pg_pack->handle; in map_device_va()
1095 if (phys_pg_pack->page_size == page_size) { in map_device_va()
1119 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); in map_device_va()
1120 if (!phys_pg_pack) { in map_device_va()
1128 atomic_inc(&phys_pg_pack->mapping_cnt); in map_device_va()
1132 vm_type = (enum vm_type *) phys_pg_pack; in map_device_va()
1146 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && in map_device_va()
1147 phys_pg_pack->asid != ctx->asid) { in map_device_va()
1161 if (hint_addr && phys_pg_pack->offset) { in map_device_va()
1166 hint_addr, phys_pg_pack->offset); in map_device_va()
1172 hint_addr, phys_pg_pack->offset); in map_device_va()
1175 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size, in map_device_va()
1187 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack); in map_device_va()
1196 *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size); in map_device_va()
1207 ret_vaddr += phys_pg_pack->offset; in map_device_va()
1219 rc = free_phys_pg_pack(hdev, phys_pg_pack); in map_device_va()
1225 ret_vaddr + phys_pg_pack->total_size - 1)) in map_device_va()
1234 atomic_dec(&phys_pg_pack->mapping_cnt); in map_device_va()
1236 free_phys_pg_pack(hdev, phys_pg_pack); in map_device_va()
1257 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; in unmap_device_va() local
1293 rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack, in unmap_device_va()
1302 if (phys_pg_pack->page_size == in unmap_device_va()
1310 phys_pg_pack = hnode->ptr; in unmap_device_va()
1319 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { in unmap_device_va()
1325 if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size)) in unmap_device_va()
1328 phys_pg_pack->page_size) * in unmap_device_va()
1329 phys_pg_pack->page_size; in unmap_device_va()
1331 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); in unmap_device_va()
1335 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack); in unmap_device_va()
1345 phys_pg_pack->total_size); in unmap_device_va()
1364 vaddr + phys_pg_pack->total_size - 1); in unmap_device_va()
1374 atomic_dec(&phys_pg_pack->mapping_cnt); in unmap_device_va()
1378 free_phys_pg_pack(hdev, phys_pg_pack); in unmap_device_va()
1386 free_phys_pg_pack(hdev, phys_pg_pack); in unmap_device_va()