1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14
cb_map_mem(struct hl_ctx * ctx,struct hl_cb * cb)15 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
16 {
17 struct hl_device *hdev = ctx->hdev;
18 struct asic_fixed_properties *prop = &hdev->asic_prop;
19 struct hl_vm_va_block *va_block, *tmp;
20 dma_addr_t bus_addr;
21 u64 virt_addr;
22 u32 page_size = prop->pmmu.page_size;
23 s32 offset;
24 int rc;
25
26 if (!hdev->supports_cb_mapping) {
27 dev_err_ratelimited(hdev->dev,
28 "Cannot map CB because no VA range is allocated for CB mapping\n");
29 return -EINVAL;
30 }
31
32 if (!hdev->mmu_enable) {
33 dev_err_ratelimited(hdev->dev,
34 "Cannot map CB because MMU is disabled\n");
35 return -EINVAL;
36 }
37
38 INIT_LIST_HEAD(&cb->va_block_list);
39
40 for (bus_addr = cb->bus_address;
41 bus_addr < cb->bus_address + cb->size;
42 bus_addr += page_size) {
43
44 virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
45 if (!virt_addr) {
46 dev_err(hdev->dev,
47 "Failed to allocate device virtual address for CB\n");
48 rc = -ENOMEM;
49 goto err_va_pool_free;
50 }
51
52 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
53 if (!va_block) {
54 rc = -ENOMEM;
55 gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
56 goto err_va_pool_free;
57 }
58
59 va_block->start = virt_addr;
60 va_block->end = virt_addr + page_size;
61 va_block->size = page_size;
62 list_add_tail(&va_block->node, &cb->va_block_list);
63 }
64
65 mutex_lock(&ctx->mmu_lock);
66
67 bus_addr = cb->bus_address;
68 offset = 0;
69 list_for_each_entry(va_block, &cb->va_block_list, node) {
70 rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
71 va_block->size, list_is_last(&va_block->node,
72 &cb->va_block_list));
73 if (rc) {
74 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
75 va_block->start);
76 goto err_va_umap;
77 }
78
79 bus_addr += va_block->size;
80 offset += va_block->size;
81 }
82
83 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
84
85 mutex_unlock(&ctx->mmu_lock);
86
87 cb->is_mmu_mapped = true;
88
89 return 0;
90
91 err_va_umap:
92 list_for_each_entry(va_block, &cb->va_block_list, node) {
93 if (offset <= 0)
94 break;
95 hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
96 offset <= va_block->size);
97 offset -= va_block->size;
98 }
99
100 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
101
102 mutex_unlock(&ctx->mmu_lock);
103
104 err_va_pool_free:
105 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
106 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
107 list_del(&va_block->node);
108 kfree(va_block);
109 }
110
111 return rc;
112 }
113
cb_unmap_mem(struct hl_ctx * ctx,struct hl_cb * cb)114 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
115 {
116 struct hl_device *hdev = ctx->hdev;
117 struct hl_vm_va_block *va_block, *tmp;
118
119 mutex_lock(&ctx->mmu_lock);
120
121 list_for_each_entry(va_block, &cb->va_block_list, node)
122 if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
123 list_is_last(&va_block->node,
124 &cb->va_block_list)))
125 dev_warn_ratelimited(hdev->dev,
126 "Failed to unmap CB's va 0x%llx\n",
127 va_block->start);
128
129 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
130
131 mutex_unlock(&ctx->mmu_lock);
132
133 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
134 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
135 list_del(&va_block->node);
136 kfree(va_block);
137 }
138 }
139
cb_fini(struct hl_device * hdev,struct hl_cb * cb)140 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
141 {
142 if (cb->is_internal)
143 gen_pool_free(hdev->internal_cb_pool,
144 (uintptr_t)cb->kernel_address, cb->size);
145 else
146 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
147 cb->kernel_address, cb->bus_address);
148
149 kfree(cb);
150 }
151
cb_do_release(struct hl_device * hdev,struct hl_cb * cb)152 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
153 {
154 if (cb->is_pool) {
155 spin_lock(&hdev->cb_pool_lock);
156 list_add(&cb->pool_list, &hdev->cb_pool);
157 spin_unlock(&hdev->cb_pool_lock);
158 } else {
159 cb_fini(hdev, cb);
160 }
161 }
162
cb_release(struct kref * ref)163 static void cb_release(struct kref *ref)
164 {
165 struct hl_device *hdev;
166 struct hl_cb *cb;
167
168 cb = container_of(ref, struct hl_cb, refcount);
169 hdev = cb->hdev;
170
171 hl_debugfs_remove_cb(cb);
172
173 if (cb->is_mmu_mapped)
174 cb_unmap_mem(cb->ctx, cb);
175
176 hl_ctx_put(cb->ctx);
177
178 cb_do_release(hdev, cb);
179 }
180
hl_cb_alloc(struct hl_device * hdev,u32 cb_size,int ctx_id,bool internal_cb)181 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
182 int ctx_id, bool internal_cb)
183 {
184 struct hl_cb *cb = NULL;
185 u32 cb_offset;
186 void *p;
187
188 /*
189 * We use of GFP_ATOMIC here because this function can be called from
190 * the latency-sensitive code path for command submission. Due to H/W
191 * limitations in some of the ASICs, the kernel must copy the user CB
192 * that is designated for an external queue and actually enqueue
193 * the kernel's copy. Hence, we must never sleep in this code section
194 * and must use GFP_ATOMIC for all memory allocations.
195 */
196 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
197 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
198
199 if (!cb)
200 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
201
202 if (!cb)
203 return NULL;
204
205 if (internal_cb) {
206 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
207 if (!p) {
208 kfree(cb);
209 return NULL;
210 }
211
212 cb_offset = p - hdev->internal_cb_pool_virt_addr;
213 cb->is_internal = true;
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
215 } else if (ctx_id == HL_KERNEL_ASID_ID) {
216 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
217 &cb->bus_address, GFP_ATOMIC);
218 if (!p)
219 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
220 cb_size, &cb->bus_address, GFP_KERNEL);
221 } else {
222 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
223 &cb->bus_address,
224 GFP_USER | __GFP_ZERO);
225 }
226
227 if (!p) {
228 dev_err(hdev->dev,
229 "failed to allocate %d of dma memory for CB\n",
230 cb_size);
231 kfree(cb);
232 return NULL;
233 }
234
235 cb->kernel_address = p;
236 cb->size = cb_size;
237
238 return cb;
239 }
240
hl_cb_create(struct hl_device * hdev,struct hl_cb_mgr * mgr,struct hl_ctx * ctx,u32 cb_size,bool internal_cb,bool map_cb,u64 * handle)241 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
242 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
243 bool map_cb, u64 *handle)
244 {
245 struct hl_cb *cb;
246 bool alloc_new_cb = true;
247 int rc, ctx_id = ctx->asid;
248
249 /*
250 * Can't use generic function to check this because of special case
251 * where we create a CB as part of the reset process
252 */
253 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
254 (ctx_id != HL_KERNEL_ASID_ID))) {
255 dev_warn_ratelimited(hdev->dev,
256 "Device is disabled or in reset. Can't create new CBs\n");
257 rc = -EBUSY;
258 goto out_err;
259 }
260
261 if (cb_size > SZ_2M) {
262 dev_err(hdev->dev, "CB size %d must be less than %d\n",
263 cb_size, SZ_2M);
264 rc = -EINVAL;
265 goto out_err;
266 }
267
268 if (!internal_cb) {
269 /* Minimum allocation must be PAGE SIZE */
270 if (cb_size < PAGE_SIZE)
271 cb_size = PAGE_SIZE;
272
273 if (ctx_id == HL_KERNEL_ASID_ID &&
274 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
275
276 spin_lock(&hdev->cb_pool_lock);
277 if (!list_empty(&hdev->cb_pool)) {
278 cb = list_first_entry(&hdev->cb_pool,
279 typeof(*cb), pool_list);
280 list_del(&cb->pool_list);
281 spin_unlock(&hdev->cb_pool_lock);
282 alloc_new_cb = false;
283 } else {
284 spin_unlock(&hdev->cb_pool_lock);
285 dev_dbg(hdev->dev, "CB pool is empty\n");
286 }
287 }
288 }
289
290 if (alloc_new_cb) {
291 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
292 if (!cb) {
293 rc = -ENOMEM;
294 goto out_err;
295 }
296 }
297
298 cb->hdev = hdev;
299 cb->ctx = ctx;
300 hl_ctx_get(hdev, cb->ctx);
301
302 if (map_cb) {
303 if (ctx_id == HL_KERNEL_ASID_ID) {
304 dev_err(hdev->dev,
305 "CB mapping is not supported for kernel context\n");
306 rc = -EINVAL;
307 goto release_cb;
308 }
309
310 rc = cb_map_mem(ctx, cb);
311 if (rc)
312 goto release_cb;
313 }
314
315 spin_lock(&mgr->cb_lock);
316 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
317 spin_unlock(&mgr->cb_lock);
318
319 if (rc < 0) {
320 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
321 goto unmap_mem;
322 }
323
324 cb->id = (u64) rc;
325
326 kref_init(&cb->refcount);
327 spin_lock_init(&cb->lock);
328
329 /*
330 * idr is 32-bit so we can safely OR it with a mask that is above
331 * 32 bit
332 */
333 *handle = cb->id | HL_MMAP_TYPE_CB;
334 *handle <<= PAGE_SHIFT;
335
336 hl_debugfs_add_cb(cb);
337
338 return 0;
339
340 unmap_mem:
341 if (cb->is_mmu_mapped)
342 cb_unmap_mem(cb->ctx, cb);
343 release_cb:
344 hl_ctx_put(cb->ctx);
345 cb_do_release(hdev, cb);
346 out_err:
347 *handle = 0;
348
349 return rc;
350 }
351
hl_cb_destroy(struct hl_device * hdev,struct hl_cb_mgr * mgr,u64 cb_handle)352 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
353 {
354 struct hl_cb *cb;
355 u32 handle;
356 int rc = 0;
357
358 /*
359 * handle was given to user to do mmap, I need to shift it back to
360 * how the idr module gave it to me
361 */
362 cb_handle >>= PAGE_SHIFT;
363 handle = (u32) cb_handle;
364
365 spin_lock(&mgr->cb_lock);
366
367 cb = idr_find(&mgr->cb_handles, handle);
368 if (cb) {
369 idr_remove(&mgr->cb_handles, handle);
370 spin_unlock(&mgr->cb_lock);
371 kref_put(&cb->refcount, cb_release);
372 } else {
373 spin_unlock(&mgr->cb_lock);
374 dev_err(hdev->dev,
375 "CB destroy failed, no match to handle 0x%x\n", handle);
376 rc = -EINVAL;
377 }
378
379 return rc;
380 }
381
hl_cb_info(struct hl_device * hdev,struct hl_cb_mgr * mgr,u64 cb_handle,u32 * usage_cnt)382 static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
383 u64 cb_handle, u32 *usage_cnt)
384 {
385 struct hl_cb *cb;
386 u32 handle;
387 int rc = 0;
388
389 /* The CB handle was given to user to do mmap, so need to shift it back
390 * to the value which was allocated by the IDR module.
391 */
392 cb_handle >>= PAGE_SHIFT;
393 handle = (u32) cb_handle;
394
395 spin_lock(&mgr->cb_lock);
396
397 cb = idr_find(&mgr->cb_handles, handle);
398 if (!cb) {
399 dev_err(hdev->dev,
400 "CB info failed, no match to handle 0x%x\n", handle);
401 rc = -EINVAL;
402 goto out;
403 }
404
405 *usage_cnt = atomic_read(&cb->cs_cnt);
406
407 out:
408 spin_unlock(&mgr->cb_lock);
409 return rc;
410 }
411
hl_cb_ioctl(struct hl_fpriv * hpriv,void * data)412 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
413 {
414 union hl_cb_args *args = data;
415 struct hl_device *hdev = hpriv->hdev;
416 enum hl_device_status status;
417 u64 handle = 0;
418 u32 usage_cnt = 0;
419 int rc;
420
421 if (!hl_device_operational(hdev, &status)) {
422 dev_warn_ratelimited(hdev->dev,
423 "Device is %s. Can't execute CB IOCTL\n",
424 hdev->status[status]);
425 return -EBUSY;
426 }
427
428 switch (args->in.op) {
429 case HL_CB_OP_CREATE:
430 if (args->in.cb_size > HL_MAX_CB_SIZE) {
431 dev_err(hdev->dev,
432 "User requested CB size %d must be less than %d\n",
433 args->in.cb_size, HL_MAX_CB_SIZE);
434 rc = -EINVAL;
435 } else {
436 rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
437 args->in.cb_size, false,
438 !!(args->in.flags & HL_CB_FLAGS_MAP),
439 &handle);
440 }
441
442 memset(args, 0, sizeof(*args));
443 args->out.cb_handle = handle;
444 break;
445
446 case HL_CB_OP_DESTROY:
447 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
448 args->in.cb_handle);
449 break;
450
451 case HL_CB_OP_INFO:
452 rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
453 &usage_cnt);
454 memset(args, 0, sizeof(*args));
455 args->out.usage_cnt = usage_cnt;
456 break;
457
458 default:
459 rc = -ENOTTY;
460 break;
461 }
462
463 return rc;
464 }
465
cb_vm_close(struct vm_area_struct * vma)466 static void cb_vm_close(struct vm_area_struct *vma)
467 {
468 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
469 long new_mmap_size;
470
471 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
472
473 if (new_mmap_size > 0) {
474 cb->mmap_size = new_mmap_size;
475 return;
476 }
477
478 spin_lock(&cb->lock);
479 cb->mmap = false;
480 spin_unlock(&cb->lock);
481
482 hl_cb_put(cb);
483 vma->vm_private_data = NULL;
484 }
485
486 static const struct vm_operations_struct cb_vm_ops = {
487 .close = cb_vm_close
488 };
489
hl_cb_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)490 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
491 {
492 struct hl_device *hdev = hpriv->hdev;
493 struct hl_cb *cb;
494 u32 handle, user_cb_size;
495 int rc;
496
497 /* We use the page offset to hold the idr and thus we need to clear
498 * it before doing the mmap itself
499 */
500 handle = vma->vm_pgoff;
501 vma->vm_pgoff = 0;
502
503 /* reference was taken here */
504 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
505 if (!cb) {
506 dev_err(hdev->dev,
507 "CB mmap failed, no match to handle 0x%x\n", handle);
508 return -EINVAL;
509 }
510
511 /* Validation check */
512 user_cb_size = vma->vm_end - vma->vm_start;
513 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
514 dev_err(hdev->dev,
515 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
516 vma->vm_end - vma->vm_start, cb->size);
517 rc = -EINVAL;
518 goto put_cb;
519 }
520
521 if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
522 user_cb_size)) {
523 dev_err(hdev->dev,
524 "user pointer is invalid - 0x%lx\n",
525 vma->vm_start);
526
527 rc = -EINVAL;
528 goto put_cb;
529 }
530
531 spin_lock(&cb->lock);
532
533 if (cb->mmap) {
534 dev_err(hdev->dev,
535 "CB mmap failed, CB already mmaped to user\n");
536 rc = -EINVAL;
537 goto release_lock;
538 }
539
540 cb->mmap = true;
541
542 spin_unlock(&cb->lock);
543
544 vma->vm_ops = &cb_vm_ops;
545
546 /*
547 * Note: We're transferring the cb reference to
548 * vma->vm_private_data here.
549 */
550
551 vma->vm_private_data = cb;
552
553 rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
554 cb->bus_address, cb->size);
555 if (rc) {
556 spin_lock(&cb->lock);
557 cb->mmap = false;
558 goto release_lock;
559 }
560
561 cb->mmap_size = cb->size;
562 vma->vm_pgoff = handle;
563
564 return 0;
565
566 release_lock:
567 spin_unlock(&cb->lock);
568 put_cb:
569 hl_cb_put(cb);
570 return rc;
571 }
572
hl_cb_get(struct hl_device * hdev,struct hl_cb_mgr * mgr,u32 handle)573 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
574 u32 handle)
575 {
576 struct hl_cb *cb;
577
578 spin_lock(&mgr->cb_lock);
579 cb = idr_find(&mgr->cb_handles, handle);
580
581 if (!cb) {
582 spin_unlock(&mgr->cb_lock);
583 dev_warn(hdev->dev,
584 "CB get failed, no match to handle 0x%x\n", handle);
585 return NULL;
586 }
587
588 kref_get(&cb->refcount);
589
590 spin_unlock(&mgr->cb_lock);
591
592 return cb;
593
594 }
595
hl_cb_put(struct hl_cb * cb)596 void hl_cb_put(struct hl_cb *cb)
597 {
598 kref_put(&cb->refcount, cb_release);
599 }
600
hl_cb_mgr_init(struct hl_cb_mgr * mgr)601 void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
602 {
603 spin_lock_init(&mgr->cb_lock);
604 idr_init(&mgr->cb_handles);
605 }
606
hl_cb_mgr_fini(struct hl_device * hdev,struct hl_cb_mgr * mgr)607 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
608 {
609 struct hl_cb *cb;
610 struct idr *idp;
611 u32 id;
612
613 idp = &mgr->cb_handles;
614
615 idr_for_each_entry(idp, cb, id) {
616 if (kref_put(&cb->refcount, cb_release) != 1)
617 dev_err(hdev->dev,
618 "CB %d for CTX ID %d is still alive\n",
619 id, cb->ctx->asid);
620 }
621
622 idr_destroy(&mgr->cb_handles);
623 }
624
hl_cb_kernel_create(struct hl_device * hdev,u32 cb_size,bool internal_cb)625 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
626 bool internal_cb)
627 {
628 u64 cb_handle;
629 struct hl_cb *cb;
630 int rc;
631
632 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
633 internal_cb, false, &cb_handle);
634 if (rc) {
635 dev_err(hdev->dev,
636 "Failed to allocate CB for the kernel driver %d\n", rc);
637 return NULL;
638 }
639
640 cb_handle >>= PAGE_SHIFT;
641 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
642 /* hl_cb_get should never fail here */
643 if (!cb) {
644 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
645 (u32) cb_handle);
646 goto destroy_cb;
647 }
648
649 return cb;
650
651 destroy_cb:
652 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
653
654 return NULL;
655 }
656
hl_cb_pool_init(struct hl_device * hdev)657 int hl_cb_pool_init(struct hl_device *hdev)
658 {
659 struct hl_cb *cb;
660 int i;
661
662 INIT_LIST_HEAD(&hdev->cb_pool);
663 spin_lock_init(&hdev->cb_pool_lock);
664
665 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
666 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
667 HL_KERNEL_ASID_ID, false);
668 if (cb) {
669 cb->is_pool = true;
670 list_add(&cb->pool_list, &hdev->cb_pool);
671 } else {
672 hl_cb_pool_fini(hdev);
673 return -ENOMEM;
674 }
675 }
676
677 return 0;
678 }
679
hl_cb_pool_fini(struct hl_device * hdev)680 int hl_cb_pool_fini(struct hl_device *hdev)
681 {
682 struct hl_cb *cb, *tmp;
683
684 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
685 list_del(&cb->pool_list);
686 cb_fini(hdev, cb);
687 }
688
689 return 0;
690 }
691
hl_cb_va_pool_init(struct hl_ctx * ctx)692 int hl_cb_va_pool_init(struct hl_ctx *ctx)
693 {
694 struct hl_device *hdev = ctx->hdev;
695 struct asic_fixed_properties *prop = &hdev->asic_prop;
696 int rc;
697
698 if (!hdev->supports_cb_mapping)
699 return 0;
700
701 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
702 if (!ctx->cb_va_pool) {
703 dev_err(hdev->dev,
704 "Failed to create VA gen pool for CB mapping\n");
705 return -ENOMEM;
706 }
707
708 rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
709 prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
710 if (rc) {
711 dev_err(hdev->dev,
712 "Failed to add memory to VA gen pool for CB mapping\n");
713 goto err_pool_destroy;
714 }
715
716 return 0;
717
718 err_pool_destroy:
719 gen_pool_destroy(ctx->cb_va_pool);
720
721 return rc;
722 }
723
hl_cb_va_pool_fini(struct hl_ctx * ctx)724 void hl_cb_va_pool_fini(struct hl_ctx *ctx)
725 {
726 struct hl_device *hdev = ctx->hdev;
727
728 if (!hdev->supports_cb_mapping)
729 return;
730
731 gen_pool_destroy(ctx->cb_va_pool);
732 }
733