1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "../habanalabs.h"
9 #include "../../include/hw_ip/mmu/mmu_general.h"
10
11 #include <linux/slab.h>
12
13 #define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
14
15 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
16
get_pgt_info(struct hl_ctx * ctx,u64 hop_addr)17 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
18 {
19 struct pgt_info *pgt_info = NULL;
20
21 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
22 (unsigned long) hop_addr)
23 if (hop_addr == pgt_info->shadow_addr)
24 break;
25
26 return pgt_info;
27 }
28
_free_hop(struct hl_ctx * ctx,struct pgt_info * pgt_info)29 static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
30 {
31 struct hl_device *hdev = ctx->hdev;
32
33 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
34 hdev->asic_prop.mmu_hop_table_size);
35 hash_del(&pgt_info->node);
36 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
37 kfree(pgt_info);
38 }
39
free_hop(struct hl_ctx * ctx,u64 hop_addr)40 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
41 {
42 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
43
44 _free_hop(ctx, pgt_info);
45 }
46
alloc_hop(struct hl_ctx * ctx)47 static u64 alloc_hop(struct hl_ctx *ctx)
48 {
49 struct hl_device *hdev = ctx->hdev;
50 struct asic_fixed_properties *prop = &hdev->asic_prop;
51 struct pgt_info *pgt_info;
52 u64 phys_addr, shadow_addr;
53
54 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
55 if (!pgt_info)
56 return ULLONG_MAX;
57
58 phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
59 prop->mmu_hop_table_size);
60 if (!phys_addr) {
61 dev_err(hdev->dev, "failed to allocate page\n");
62 goto pool_add_err;
63 }
64
65 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
66 GFP_KERNEL);
67 if (!shadow_addr)
68 goto shadow_err;
69
70 pgt_info->phys_addr = phys_addr;
71 pgt_info->shadow_addr = shadow_addr;
72 pgt_info->ctx = ctx;
73 pgt_info->num_of_ptes = 0;
74 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
75
76 return shadow_addr;
77
78 shadow_err:
79 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
80 prop->mmu_hop_table_size);
81 pool_add_err:
82 kfree(pgt_info);
83
84 return ULLONG_MAX;
85 }
86
get_phys_hop0_addr(struct hl_ctx * ctx)87 static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
88 {
89 return ctx->hdev->asic_prop.mmu_pgt_addr +
90 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
91 }
92
get_hop0_addr(struct hl_ctx * ctx)93 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
94 {
95 return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
96 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
97 }
98
flush(struct hl_ctx * ctx)99 static void flush(struct hl_ctx *ctx)
100 {
101 /* flush all writes from all cores to reach PCI */
102 mb();
103 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
104 }
105
106 /* transform the value to physical address when writing to H/W */
write_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)107 static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
108 {
109 /*
110 * The value to write is actually the address of the next shadow hop +
111 * flags at the 12 LSBs.
112 * Hence in order to get the value to write to the physical PTE, we
113 * clear the 12 LSBs and translate the shadow hop to its associated
114 * physical hop, and add back the original 12 LSBs.
115 */
116 u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
117 (val & FLAGS_MASK);
118
119 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
120 get_phys_addr(ctx, shadow_pte_addr),
121 phys_val);
122
123 *(u64 *) (uintptr_t) shadow_pte_addr = val;
124 }
125
126 /* do not transform the value to physical address when writing to H/W */
write_final_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)127 static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
128 u64 val)
129 {
130 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
131 get_phys_addr(ctx, shadow_pte_addr),
132 val);
133 *(u64 *) (uintptr_t) shadow_pte_addr = val;
134 }
135
136 /* clear the last and present bits */
clear_pte(struct hl_ctx * ctx,u64 pte_addr)137 static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
138 {
139 /* no need to transform the value to physical address */
140 write_final_pte(ctx, pte_addr, 0);
141 }
142
get_pte(struct hl_ctx * ctx,u64 hop_addr)143 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
144 {
145 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
146 }
147
148 /*
149 * put_pte - decrement the num of ptes and free the hop if possible
150 *
151 * @ctx: pointer to the context structure
152 * @hop_addr: addr of the hop
153 *
154 * This function returns the number of ptes left on this hop. If the number is
155 * 0, it means the pte was freed.
156 */
put_pte(struct hl_ctx * ctx,u64 hop_addr)157 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
158 {
159 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
160 int num_of_ptes_left;
161
162 pgt_info->num_of_ptes--;
163
164 /*
165 * Need to save the number of ptes left because free_hop might free
166 * the pgt_info
167 */
168 num_of_ptes_left = pgt_info->num_of_ptes;
169 if (!num_of_ptes_left)
170 _free_hop(ctx, pgt_info);
171
172 return num_of_ptes_left;
173 }
174
get_hop_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 * hop_addr_arr,u64 virt_addr,enum mmu_hop_num hop_idx)175 static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
176 u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
177 {
178 u64 mask, shift;
179
180 mask = mmu_prop->hop_masks[hop_idx];
181 shift = mmu_prop->hop_shifts[hop_idx];
182 return hop_addr_arr[hop_idx] +
183 ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
184 }
185
get_alloc_next_hop_addr(struct hl_ctx * ctx,u64 curr_pte,bool * is_new_hop)186 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
187 bool *is_new_hop)
188 {
189 u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
190
191 if (hop_addr == ULLONG_MAX) {
192 hop_addr = alloc_hop(ctx);
193 *is_new_hop = (hop_addr != ULLONG_MAX);
194 }
195
196 return hop_addr;
197 }
198
199 /* translates shadow address inside hop to a physical address */
get_phys_addr(struct hl_ctx * ctx,u64 shadow_addr)200 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
201 {
202 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
203 u64 shadow_hop_addr = shadow_addr & ~page_mask;
204 u64 pte_offset = shadow_addr & page_mask;
205 u64 phys_hop_addr;
206
207 if (shadow_hop_addr != get_hop0_addr(ctx))
208 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
209 else
210 phys_hop_addr = get_phys_hop0_addr(ctx);
211
212 return phys_hop_addr + pte_offset;
213 }
214
dram_default_mapping_init(struct hl_ctx * ctx)215 static int dram_default_mapping_init(struct hl_ctx *ctx)
216 {
217 struct hl_device *hdev = ctx->hdev;
218 struct asic_fixed_properties *prop = &hdev->asic_prop;
219 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
220 hop2_pte_addr, hop3_pte_addr, pte_val;
221 int rc, i, j, hop3_allocated = 0;
222
223 if ((!prop->dram_supports_virtual_memory) ||
224 (!hdev->dram_default_page_mapping) ||
225 (ctx->asid == HL_KERNEL_ASID_ID))
226 return 0;
227
228 num_of_hop3 = prop->dram_size_for_default_page_mapping;
229 do_div(num_of_hop3, prop->dram_page_size);
230 do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
231
232 /* add hop1 and hop2 */
233 total_hops = num_of_hop3 + 2;
234
235 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
236 if (!ctx->dram_default_hops)
237 return -ENOMEM;
238
239 hop0_addr = get_hop0_addr(ctx);
240
241 hop1_addr = alloc_hop(ctx);
242 if (hop1_addr == ULLONG_MAX) {
243 dev_err(hdev->dev, "failed to alloc hop 1\n");
244 rc = -ENOMEM;
245 goto hop1_err;
246 }
247
248 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
249
250 hop2_addr = alloc_hop(ctx);
251 if (hop2_addr == ULLONG_MAX) {
252 dev_err(hdev->dev, "failed to alloc hop 2\n");
253 rc = -ENOMEM;
254 goto hop2_err;
255 }
256
257 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
258
259 for (i = 0 ; i < num_of_hop3 ; i++) {
260 ctx->dram_default_hops[i] = alloc_hop(ctx);
261 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
262 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
263 rc = -ENOMEM;
264 goto hop3_err;
265 }
266 hop3_allocated++;
267 }
268
269 /* need only pte 0 in hops 0 and 1 */
270 pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
271 write_pte(ctx, hop0_addr, pte_val);
272
273 pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
274 write_pte(ctx, hop1_addr, pte_val);
275 get_pte(ctx, hop1_addr);
276
277 hop2_pte_addr = hop2_addr;
278 for (i = 0 ; i < num_of_hop3 ; i++) {
279 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
280 PAGE_PRESENT_MASK;
281 write_pte(ctx, hop2_pte_addr, pte_val);
282 get_pte(ctx, hop2_addr);
283 hop2_pte_addr += HL_PTE_SIZE;
284 }
285
286 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
287 LAST_MASK | PAGE_PRESENT_MASK;
288
289 for (i = 0 ; i < num_of_hop3 ; i++) {
290 hop3_pte_addr = ctx->dram_default_hops[i];
291 for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
292 write_final_pte(ctx, hop3_pte_addr, pte_val);
293 get_pte(ctx, ctx->dram_default_hops[i]);
294 hop3_pte_addr += HL_PTE_SIZE;
295 }
296 }
297
298 flush(ctx);
299
300 return 0;
301
302 hop3_err:
303 for (i = 0 ; i < hop3_allocated ; i++)
304 free_hop(ctx, ctx->dram_default_hops[i]);
305
306 free_hop(ctx, hop2_addr);
307 hop2_err:
308 free_hop(ctx, hop1_addr);
309 hop1_err:
310 kfree(ctx->dram_default_hops);
311
312 return rc;
313 }
314
dram_default_mapping_fini(struct hl_ctx * ctx)315 static void dram_default_mapping_fini(struct hl_ctx *ctx)
316 {
317 struct hl_device *hdev = ctx->hdev;
318 struct asic_fixed_properties *prop = &hdev->asic_prop;
319 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
320 hop2_pte_addr, hop3_pte_addr;
321 int i, j;
322
323 if ((!prop->dram_supports_virtual_memory) ||
324 (!hdev->dram_default_page_mapping) ||
325 (ctx->asid == HL_KERNEL_ASID_ID))
326 return;
327
328 num_of_hop3 = prop->dram_size_for_default_page_mapping;
329 do_div(num_of_hop3, prop->dram_page_size);
330 do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
331
332 hop0_addr = get_hop0_addr(ctx);
333 /* add hop1 and hop2 */
334 total_hops = num_of_hop3 + 2;
335 hop1_addr = ctx->dram_default_hops[total_hops - 1];
336 hop2_addr = ctx->dram_default_hops[total_hops - 2];
337
338 for (i = 0 ; i < num_of_hop3 ; i++) {
339 hop3_pte_addr = ctx->dram_default_hops[i];
340 for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
341 clear_pte(ctx, hop3_pte_addr);
342 put_pte(ctx, ctx->dram_default_hops[i]);
343 hop3_pte_addr += HL_PTE_SIZE;
344 }
345 }
346
347 hop2_pte_addr = hop2_addr;
348 hop2_pte_addr = hop2_addr;
349 for (i = 0 ; i < num_of_hop3 ; i++) {
350 clear_pte(ctx, hop2_pte_addr);
351 put_pte(ctx, hop2_addr);
352 hop2_pte_addr += HL_PTE_SIZE;
353 }
354
355 clear_pte(ctx, hop1_addr);
356 put_pte(ctx, hop1_addr);
357 clear_pte(ctx, hop0_addr);
358
359 kfree(ctx->dram_default_hops);
360
361 flush(ctx);
362 }
363
364 /**
365 * hl_mmu_v1_init() - initialize the MMU module.
366 * @hdev: habanalabs device structure.
367 *
368 * This function does the following:
369 * - Create a pool of pages for pgt_infos.
370 * - Create a shadow table for pgt
371 *
372 * Return: 0 for success, non-zero for failure.
373 */
hl_mmu_v1_init(struct hl_device * hdev)374 static int hl_mmu_v1_init(struct hl_device *hdev)
375 {
376 struct asic_fixed_properties *prop = &hdev->asic_prop;
377 int rc;
378
379 hdev->mmu_priv.dr.mmu_pgt_pool =
380 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
381
382 if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
383 dev_err(hdev->dev, "Failed to create page gen pool\n");
384 return -ENOMEM;
385 }
386
387 rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
388 prop->mmu_hop0_tables_total_size,
389 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
390 -1);
391 if (rc) {
392 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
393 goto err_pool_add;
394 }
395
396 hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
397 GFP_KERNEL);
398 if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
399 rc = -ENOMEM;
400 goto err_pool_add;
401 }
402
403 /* MMU H/W init will be done in device hw_init() */
404
405 return 0;
406
407 err_pool_add:
408 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
409
410 return rc;
411 }
412
413 /**
414 * hl_mmu_v1_fini() - release the MMU module.
415 * @hdev: habanalabs device structure.
416 *
417 * This function does the following:
418 * - Disable MMU in H/W.
419 * - Free the pgt_infos pool.
420 *
421 * All contexts should be freed before calling this function.
422 */
hl_mmu_v1_fini(struct hl_device * hdev)423 static void hl_mmu_v1_fini(struct hl_device *hdev)
424 {
425 /* MMU H/W fini was already done in device hw_fini() */
426
427 if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
428 kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
429 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
430
431 /* Make sure that if we arrive here again without init was
432 * called we won't cause kernel panic. This can happen for
433 * example if we fail during hard reset code at certain points
434 */
435 hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
436 }
437 }
438
439 /**
440 * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
441 * @ctx: pointer to the context structure to initialize.
442 *
443 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
444 * page tables hops related to this context.
445 * Return: 0 on success, non-zero otherwise.
446 */
hl_mmu_v1_ctx_init(struct hl_ctx * ctx)447 static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
448 {
449 hash_init(ctx->mmu_shadow_hash);
450 return dram_default_mapping_init(ctx);
451 }
452
453 /*
454 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
455 *
456 * @ctx: pointer to the context structure
457 *
458 * This function does the following:
459 * - Free any pgts which were not freed yet
460 * - Free the mutex
461 * - Free DRAM default page mapping hops
462 */
hl_mmu_v1_ctx_fini(struct hl_ctx * ctx)463 static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
464 {
465 struct hl_device *hdev = ctx->hdev;
466 struct pgt_info *pgt_info;
467 struct hlist_node *tmp;
468 int i;
469
470 dram_default_mapping_fini(ctx);
471
472 if (!hash_empty(ctx->mmu_shadow_hash))
473 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
474 ctx->asid);
475
476 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
477 dev_err_ratelimited(hdev->dev,
478 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
479 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
480 _free_hop(ctx, pgt_info);
481 }
482 }
483
hl_mmu_v1_unmap(struct hl_ctx * ctx,u64 virt_addr,bool is_dram_addr)484 static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
485 u64 virt_addr, bool is_dram_addr)
486 {
487 u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
488 struct hl_device *hdev = ctx->hdev;
489 struct asic_fixed_properties *prop = &hdev->asic_prop;
490 struct hl_mmu_properties *mmu_prop;
491 bool is_huge, clear_hop3 = true;
492 int hop_idx;
493
494 /* shifts and masks are the same in PMMU and HPMMU, use one of them */
495 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
496
497 for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
498 if (hop_idx == MMU_HOP0) {
499 hop_addr[hop_idx] = get_hop0_addr(ctx);
500 } else {
501 hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
502 if (hop_addr[hop_idx] == ULLONG_MAX)
503 goto not_mapped;
504 }
505
506 hop_pte_addr[hop_idx] =
507 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
508
509 curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
510 }
511
512 is_huge = curr_pte & mmu_prop->last_mask;
513
514 if (is_dram_addr && !is_huge) {
515 dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
516 return -EFAULT;
517 }
518
519 if (!is_huge) {
520 hop_idx = MMU_HOP4;
521 hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
522 if (hop_addr[hop_idx] == ULLONG_MAX)
523 goto not_mapped;
524
525 hop_pte_addr[hop_idx] =
526 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
527 curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
528 clear_hop3 = false;
529 }
530
531 if (hdev->dram_default_page_mapping && is_dram_addr) {
532 u64 default_pte = (prop->mmu_dram_default_page_addr &
533 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
534 PAGE_PRESENT_MASK;
535 if (curr_pte == default_pte) {
536 dev_err(hdev->dev,
537 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
538 virt_addr);
539 goto not_mapped;
540 }
541
542 if (!(curr_pte & PAGE_PRESENT_MASK)) {
543 dev_err(hdev->dev,
544 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
545 virt_addr);
546 goto not_mapped;
547 }
548
549 hop_idx = MMU_HOP3;
550 write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
551 put_pte(ctx, hop_addr[hop_idx]);
552 } else {
553 if (!(curr_pte & PAGE_PRESENT_MASK))
554 goto not_mapped;
555
556 if (hop_addr[MMU_HOP4])
557 clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
558 else
559 clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
560
561 if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
562 clear_hop3 = true;
563
564 if (!clear_hop3)
565 goto mapped;
566
567 for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
568 clear_pte(ctx, hop_pte_addr[hop_idx]);
569
570 if (hop_idx == MMU_HOP0)
571 break;
572
573 if (put_pte(ctx, hop_addr[hop_idx]))
574 goto mapped;
575 }
576 }
577
578 mapped:
579 return 0;
580
581 not_mapped:
582 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
583 virt_addr);
584
585 return -EINVAL;
586 }
587
hl_mmu_v1_map(struct hl_ctx * ctx,u64 virt_addr,u64 phys_addr,u32 page_size,bool is_dram_addr)588 static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
589 u32 page_size, bool is_dram_addr)
590 {
591 u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
592 struct hl_device *hdev = ctx->hdev;
593 struct asic_fixed_properties *prop = &hdev->asic_prop;
594 struct hl_mmu_properties *mmu_prop;
595 bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
596 int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
597
598 /*
599 * This mapping function can map a page or a huge page. For huge page
600 * there are only 3 hops rather than 4. Currently the DRAM allocation
601 * uses huge pages only but user memory could have been allocated with
602 * one of the two page sizes. Since this is a common code for all the
603 * three cases, we need this hugs page check.
604 */
605 if (is_dram_addr) {
606 mmu_prop = &prop->dmmu;
607 is_huge = true;
608 } else if (page_size == prop->pmmu_huge.page_size) {
609 mmu_prop = &prop->pmmu_huge;
610 is_huge = true;
611 } else {
612 mmu_prop = &prop->pmmu;
613 is_huge = false;
614 }
615
616 num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
617
618 for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
619 if (hop_idx == MMU_HOP0) {
620 hop_addr[hop_idx] = get_hop0_addr(ctx);
621 } else {
622 hop_addr[hop_idx] =
623 get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
624 if (hop_addr[hop_idx] == ULLONG_MAX)
625 goto err;
626 }
627
628 hop_pte_addr[hop_idx] =
629 get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
630 curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
631 }
632
633 if (hdev->dram_default_page_mapping && is_dram_addr) {
634 u64 default_pte = (prop->mmu_dram_default_page_addr &
635 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
636 PAGE_PRESENT_MASK;
637
638 if (curr_pte != default_pte) {
639 dev_err(hdev->dev,
640 "DRAM: mapping already exists for virt_addr 0x%llx\n",
641 virt_addr);
642 rc = -EINVAL;
643 goto err;
644 }
645
646 for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
647 if (hop_new[hop_idx]) {
648 dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
649 rc = -EFAULT;
650 goto err;
651 }
652 }
653 } else if (curr_pte & PAGE_PRESENT_MASK) {
654 dev_err(hdev->dev,
655 "mapping already exists for virt_addr 0x%llx\n",
656 virt_addr);
657
658 for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
659 dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
660 *(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
661 hop_pte_addr[hop_idx]);
662
663 rc = -EINVAL;
664 goto err;
665 }
666
667 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
668 | PAGE_PRESENT_MASK;
669
670 write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
671
672 for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
673 prev_hop = hop_idx - 1;
674
675 if (hop_new[hop_idx]) {
676 curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
677 write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
678 if (hop_idx != MMU_HOP1)
679 get_pte(ctx, hop_addr[prev_hop]);
680 }
681 }
682
683 get_pte(ctx, hop_addr[num_hops - 1]);
684
685 return 0;
686
687 err:
688 for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
689 if (hop_new[hop_idx])
690 free_hop(ctx, hop_addr[hop_idx]);
691 }
692
693 return rc;
694 }
695
696 /*
697 * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
698 *
699 * @ctx: pointer to the context structure
700 *
701 */
hl_mmu_v1_swap_out(struct hl_ctx * ctx)702 static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
703 {
704
705 }
706
707 /*
708 * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
709 *
710 * @ctx: pointer to the context structure
711 *
712 */
hl_mmu_v1_swap_in(struct hl_ctx * ctx)713 static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
714 {
715
716 }
717
hl_mmu_v1_get_tlb_info(struct hl_ctx * ctx,u64 virt_addr,struct hl_mmu_hop_info * hops)718 static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
719 struct hl_mmu_hop_info *hops)
720 {
721 struct hl_device *hdev = ctx->hdev;
722 struct asic_fixed_properties *prop = &hdev->asic_prop;
723 struct hl_mmu_properties *mmu_prop;
724 bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
725 int i, used_hops;
726
727 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
728 prop->dmmu.start_addr,
729 prop->dmmu.end_addr);
730 is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
731 prop->pmmu.start_addr,
732 prop->pmmu.end_addr);
733 is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
734 prop->pmmu_huge.page_size,
735 prop->pmmu_huge.start_addr,
736 prop->pmmu_huge.end_addr);
737 if (is_dram_addr) {
738 mmu_prop = &prop->dmmu;
739 is_huge = true;
740 } else if (is_pmmu_addr) {
741 mmu_prop = &prop->pmmu;
742 is_huge = false;
743 } else if (is_pmmu_h_addr) {
744 mmu_prop = &prop->pmmu_huge;
745 is_huge = true;
746 } else {
747 return -EINVAL;
748 }
749
750 used_hops = mmu_prop->num_hops;
751
752 /* huge pages use lesser hops */
753 if (is_huge)
754 used_hops--;
755
756 hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
757 hops->hop_info[0].hop_pte_addr =
758 hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
759 hops->hop_info[0].hop_addr, virt_addr);
760 hops->hop_info[0].hop_pte_val =
761 hdev->asic_funcs->read_pte(hdev,
762 hops->hop_info[0].hop_pte_addr);
763
764 for (i = 1 ; i < used_hops ; i++) {
765 hops->hop_info[i].hop_addr =
766 hl_mmu_get_next_hop_addr(ctx,
767 hops->hop_info[i - 1].hop_pte_val);
768 if (hops->hop_info[i].hop_addr == ULLONG_MAX)
769 return -EFAULT;
770
771 hops->hop_info[i].hop_pte_addr =
772 hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
773 hops->hop_info[i].hop_addr,
774 virt_addr);
775 hops->hop_info[i].hop_pte_val =
776 hdev->asic_funcs->read_pte(hdev,
777 hops->hop_info[i].hop_pte_addr);
778
779 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
780 return -EFAULT;
781
782 if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
783 break;
784 }
785
786 /* if passed over all hops then no last hop was found */
787 if (i == mmu_prop->num_hops)
788 return -EFAULT;
789
790 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
791 return -EFAULT;
792
793 hops->used_hops = i + 1;
794
795 return 0;
796 }
797
798 /*
799 * hl_mmu_v1_prepare - prepare mmu for working with mmu v1
800 *
801 * @hdev: pointer to the device structure
802 */
hl_mmu_v1_set_funcs(struct hl_device * hdev,struct hl_mmu_funcs * mmu)803 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
804 {
805 mmu->init = hl_mmu_v1_init;
806 mmu->fini = hl_mmu_v1_fini;
807 mmu->ctx_init = hl_mmu_v1_ctx_init;
808 mmu->ctx_fini = hl_mmu_v1_ctx_fini;
809 mmu->map = hl_mmu_v1_map;
810 mmu->unmap = hl_mmu_v1_unmap;
811 mmu->flush = flush;
812 mmu->swap_out = hl_mmu_v1_swap_out;
813 mmu->swap_in = hl_mmu_v1_swap_in;
814 mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
815 }
816