1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 		     START, LAST, static, amdgpu_vm_it)
64 
65 #undef START
66 #undef LAST
67 
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76 
77 	/**
78 	 * @adev: amdgpu device we do this update for
79 	 */
80 	struct amdgpu_device *adev;
81 
82 	/**
83 	 * @vm: optional amdgpu_vm we do this update for
84 	 */
85 	struct amdgpu_vm *vm;
86 
87 	/**
88 	 * @src: address where to copy page table entries from
89 	 */
90 	uint64_t src;
91 
92 	/**
93 	 * @ib: indirect buffer to fill with commands
94 	 */
95 	struct amdgpu_ib *ib;
96 
97 	/**
98 	 * @func: Function which actually does the update
99 	 */
100 	void (*func)(struct amdgpu_pte_update_params *params,
101 		     struct amdgpu_bo *bo, uint64_t pe,
102 		     uint64_t addr, unsigned count, uint32_t incr,
103 		     uint64_t flags);
104 	/**
105 	 * @pages_addr:
106 	 *
107 	 * DMA addresses to use for mapping, used during VM update by CPU
108 	 */
109 	dma_addr_t *pages_addr;
110 
111 	/**
112 	 * @kptr:
113 	 *
114 	 * Kernel pointer of PD/PT BO that needs to be updated,
115 	 * used during VM update by CPU
116 	 */
117 	void *kptr;
118 };
119 
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124 
125 	/**
126 	 * @adev: amdgpu device
127 	 */
128 	struct amdgpu_device *adev;
129 
130 	/**
131 	 * @cb: callback
132 	 */
133 	struct dma_fence_cb cb;
134 };
135 
136 /**
137  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
138  *
139  * @base: base structure for tracking BO usage in a VM
140  * @vm: vm to which bo is to be added
141  * @bo: amdgpu buffer object
142  *
143  * Initialize a bo_va_base structure and add it to the appropriate lists
144  *
145  */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)146 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
147 				   struct amdgpu_vm *vm,
148 				   struct amdgpu_bo *bo)
149 {
150 	base->vm = vm;
151 	base->bo = bo;
152 	INIT_LIST_HEAD(&base->bo_list);
153 	INIT_LIST_HEAD(&base->vm_status);
154 
155 	if (!bo)
156 		return;
157 	list_add_tail(&base->bo_list, &bo->va);
158 
159 	if (bo->tbo.type == ttm_bo_type_kernel)
160 		list_move(&base->vm_status, &vm->relocated);
161 
162 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
163 		return;
164 
165 	if (bo->preferred_domains &
166 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
167 		return;
168 
169 	/*
170 	 * we checked all the prerequisites, but it looks like this per vm bo
171 	 * is currently evicted. add the bo to the evicted list to make sure it
172 	 * is validated on next vm use to avoid fault.
173 	 * */
174 	list_move_tail(&base->vm_status, &vm->evicted);
175 	base->moved = true;
176 }
177 
178 /**
179  * amdgpu_vm_level_shift - return the addr shift for each level
180  *
181  * @adev: amdgpu_device pointer
182  * @level: VMPT level
183  *
184  * Returns:
185  * The number of bits the pfn needs to be right shifted for a level.
186  */
amdgpu_vm_level_shift(struct amdgpu_device * adev,unsigned level)187 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
188 				      unsigned level)
189 {
190 	unsigned shift = 0xff;
191 
192 	switch (level) {
193 	case AMDGPU_VM_PDB2:
194 	case AMDGPU_VM_PDB1:
195 	case AMDGPU_VM_PDB0:
196 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
197 			adev->vm_manager.block_size;
198 		break;
199 	case AMDGPU_VM_PTB:
200 		shift = 0;
201 		break;
202 	default:
203 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
204 	}
205 
206 	return shift;
207 }
208 
209 /**
210  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
211  *
212  * @adev: amdgpu_device pointer
213  * @level: VMPT level
214  *
215  * Returns:
216  * The number of entries in a page directory or page table.
217  */
amdgpu_vm_num_entries(struct amdgpu_device * adev,unsigned level)218 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
219 				      unsigned level)
220 {
221 	unsigned shift = amdgpu_vm_level_shift(adev,
222 					       adev->vm_manager.root_level);
223 
224 	if (level == adev->vm_manager.root_level)
225 		/* For the root directory */
226 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
227 	else if (level != AMDGPU_VM_PTB)
228 		/* Everything in between */
229 		return 512;
230 	else
231 		/* For the page tables on the leaves */
232 		return AMDGPU_VM_PTE_COUNT(adev);
233 }
234 
235 /**
236  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
237  *
238  * @adev: amdgpu_device pointer
239  * @level: VMPT level
240  *
241  * Returns:
242  * The size of the BO for a page directory or page table in bytes.
243  */
amdgpu_vm_bo_size(struct amdgpu_device * adev,unsigned level)244 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
245 {
246 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
247 }
248 
249 /**
250  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
251  *
252  * @vm: vm providing the BOs
253  * @validated: head of validation list
254  * @entry: entry to add
255  *
256  * Add the page directory to the list of BOs to
257  * validate for command submission.
258  */
amdgpu_vm_get_pd_bo(struct amdgpu_vm * vm,struct list_head * validated,struct amdgpu_bo_list_entry * entry)259 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
260 			 struct list_head *validated,
261 			 struct amdgpu_bo_list_entry *entry)
262 {
263 	entry->robj = vm->root.base.bo;
264 	entry->priority = 0;
265 	entry->tv.bo = &entry->robj->tbo;
266 	entry->tv.shared = true;
267 	entry->user_pages = NULL;
268 	list_add(&entry->tv.head, validated);
269 }
270 
271 /**
272  * amdgpu_vm_validate_pt_bos - validate the page table BOs
273  *
274  * @adev: amdgpu device pointer
275  * @vm: vm providing the BOs
276  * @validate: callback to do the validation
277  * @param: parameter for the validation callback
278  *
279  * Validate the page table BOs on command submission if neccessary.
280  *
281  * Returns:
282  * Validation result.
283  */
amdgpu_vm_validate_pt_bos(struct amdgpu_device * adev,struct amdgpu_vm * vm,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)284 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
285 			      int (*validate)(void *p, struct amdgpu_bo *bo),
286 			      void *param)
287 {
288 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
289 	struct amdgpu_vm_bo_base *bo_base, *tmp;
290 	int r = 0;
291 
292 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
293 		struct amdgpu_bo *bo = bo_base->bo;
294 
295 		if (bo->parent) {
296 			r = validate(param, bo);
297 			if (r)
298 				break;
299 
300 			spin_lock(&glob->lru_lock);
301 			ttm_bo_move_to_lru_tail(&bo->tbo);
302 			if (bo->shadow)
303 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
304 			spin_unlock(&glob->lru_lock);
305 		}
306 
307 		if (bo->tbo.type != ttm_bo_type_kernel) {
308 			spin_lock(&vm->moved_lock);
309 			list_move(&bo_base->vm_status, &vm->moved);
310 			spin_unlock(&vm->moved_lock);
311 		} else {
312 			list_move(&bo_base->vm_status, &vm->relocated);
313 		}
314 	}
315 
316 	spin_lock(&glob->lru_lock);
317 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
318 		struct amdgpu_bo *bo = bo_base->bo;
319 
320 		if (!bo->parent)
321 			continue;
322 
323 		ttm_bo_move_to_lru_tail(&bo->tbo);
324 		if (bo->shadow)
325 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
326 	}
327 	spin_unlock(&glob->lru_lock);
328 
329 	return r;
330 }
331 
332 /**
333  * amdgpu_vm_ready - check VM is ready for updates
334  *
335  * @vm: VM to check
336  *
337  * Check if all VM PDs/PTs are ready for updates
338  *
339  * Returns:
340  * True if eviction list is empty.
341  */
amdgpu_vm_ready(struct amdgpu_vm * vm)342 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
343 {
344 	return list_empty(&vm->evicted);
345 }
346 
347 /**
348  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
349  *
350  * @adev: amdgpu_device pointer
351  * @vm: VM to clear BO from
352  * @bo: BO to clear
353  * @level: level this BO is at
354  * @pte_support_ats: indicate ATS support from PTE
355  *
356  * Root PD needs to be reserved when calling this.
357  *
358  * Returns:
359  * 0 on success, errno otherwise.
360  */
amdgpu_vm_clear_bo(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,unsigned level,bool pte_support_ats)361 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
362 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
363 			      unsigned level, bool pte_support_ats)
364 {
365 	struct ttm_operation_ctx ctx = { true, false };
366 	struct dma_fence *fence = NULL;
367 	unsigned entries, ats_entries;
368 	struct amdgpu_ring *ring;
369 	struct amdgpu_job *job;
370 	uint64_t addr;
371 	int r;
372 
373 	entries = amdgpu_bo_size(bo) / 8;
374 
375 	if (pte_support_ats) {
376 		if (level == adev->vm_manager.root_level) {
377 			ats_entries = amdgpu_vm_level_shift(adev, level);
378 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
379 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
380 			ats_entries = min(ats_entries, entries);
381 			entries -= ats_entries;
382 		} else {
383 			ats_entries = entries;
384 			entries = 0;
385 		}
386 	} else {
387 		ats_entries = 0;
388 	}
389 
390 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
391 
392 	r = reservation_object_reserve_shared(bo->tbo.resv);
393 	if (r)
394 		return r;
395 
396 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
397 	if (r)
398 		goto error;
399 
400 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
401 	if (r)
402 		goto error;
403 
404 	addr = amdgpu_bo_gpu_offset(bo);
405 	if (ats_entries) {
406 		uint64_t ats_value;
407 
408 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
409 		if (level != AMDGPU_VM_PTB)
410 			ats_value |= AMDGPU_PDE_PTE;
411 
412 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
413 				      ats_entries, 0, ats_value);
414 		addr += ats_entries * 8;
415 	}
416 
417 	if (entries)
418 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
419 				      entries, 0, 0);
420 
421 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
422 
423 	WARN_ON(job->ibs[0].length_dw > 64);
424 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
425 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
426 	if (r)
427 		goto error_free;
428 
429 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
430 			      &fence);
431 	if (r)
432 		goto error_free;
433 
434 	amdgpu_bo_fence(bo, fence, true);
435 	dma_fence_put(fence);
436 
437 	if (bo->shadow)
438 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
439 					  level, pte_support_ats);
440 
441 	return 0;
442 
443 error_free:
444 	amdgpu_job_free(job);
445 
446 error:
447 	return r;
448 }
449 
450 /**
451  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
452  *
453  * @adev: amdgpu_device pointer
454  * @vm: requested vm
455  * @parent: parent PT
456  * @saddr: start of the address range
457  * @eaddr: end of the address range
458  * @level: VMPT level
459  * @ats: indicate ATS support from PTE
460  *
461  * Make sure the page directories and page tables are allocated
462  *
463  * Returns:
464  * 0 on success, errno otherwise.
465  */
amdgpu_vm_alloc_levels(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt * parent,uint64_t saddr,uint64_t eaddr,unsigned level,bool ats)466 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
467 				  struct amdgpu_vm *vm,
468 				  struct amdgpu_vm_pt *parent,
469 				  uint64_t saddr, uint64_t eaddr,
470 				  unsigned level, bool ats)
471 {
472 	unsigned shift = amdgpu_vm_level_shift(adev, level);
473 	unsigned pt_idx, from, to;
474 	u64 flags;
475 	int r;
476 
477 	if (!parent->entries) {
478 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
479 
480 		parent->entries = kvmalloc_array(num_entries,
481 						   sizeof(struct amdgpu_vm_pt),
482 						   GFP_KERNEL | __GFP_ZERO);
483 		if (!parent->entries)
484 			return -ENOMEM;
485 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
486 	}
487 
488 	from = saddr >> shift;
489 	to = eaddr >> shift;
490 	if (from >= amdgpu_vm_num_entries(adev, level) ||
491 	    to >= amdgpu_vm_num_entries(adev, level))
492 		return -EINVAL;
493 
494 	++level;
495 	saddr = saddr & ((1 << shift) - 1);
496 	eaddr = eaddr & ((1 << shift) - 1);
497 
498 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
499 	if (vm->root.base.bo->shadow)
500 		flags |= AMDGPU_GEM_CREATE_SHADOW;
501 	if (vm->use_cpu_for_update)
502 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
503 	else
504 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
505 
506 	/* walk over the address space and allocate the page tables */
507 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
508 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
509 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
510 		struct amdgpu_bo *pt;
511 
512 		if (!entry->base.bo) {
513 			struct amdgpu_bo_param bp;
514 
515 			memset(&bp, 0, sizeof(bp));
516 			bp.size = amdgpu_vm_bo_size(adev, level);
517 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
518 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
519 			bp.flags = flags;
520 			bp.type = ttm_bo_type_kernel;
521 			bp.resv = resv;
522 			r = amdgpu_bo_create(adev, &bp, &pt);
523 			if (r)
524 				return r;
525 
526 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
527 			if (r) {
528 				amdgpu_bo_unref(&pt->shadow);
529 				amdgpu_bo_unref(&pt);
530 				return r;
531 			}
532 
533 			if (vm->use_cpu_for_update) {
534 				r = amdgpu_bo_kmap(pt, NULL);
535 				if (r) {
536 					amdgpu_bo_unref(&pt->shadow);
537 					amdgpu_bo_unref(&pt);
538 					return r;
539 				}
540 			}
541 
542 			/* Keep a reference to the root directory to avoid
543 			* freeing them up in the wrong order.
544 			*/
545 			pt->parent = amdgpu_bo_ref(parent->base.bo);
546 
547 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
548 		}
549 
550 		if (level < AMDGPU_VM_PTB) {
551 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
552 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
553 				((1 << shift) - 1);
554 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
555 						   sub_eaddr, level, ats);
556 			if (r)
557 				return r;
558 		}
559 	}
560 
561 	return 0;
562 }
563 
564 /**
565  * amdgpu_vm_alloc_pts - Allocate page tables.
566  *
567  * @adev: amdgpu_device pointer
568  * @vm: VM to allocate page tables for
569  * @saddr: Start address which needs to be allocated
570  * @size: Size from start address we need.
571  *
572  * Make sure the page tables are allocated.
573  *
574  * Returns:
575  * 0 on success, errno otherwise.
576  */
amdgpu_vm_alloc_pts(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)577 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
578 			struct amdgpu_vm *vm,
579 			uint64_t saddr, uint64_t size)
580 {
581 	uint64_t eaddr;
582 	bool ats = false;
583 
584 	/* validate the parameters */
585 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
586 		return -EINVAL;
587 
588 	eaddr = saddr + size - 1;
589 
590 	if (vm->pte_support_ats)
591 		ats = saddr < AMDGPU_VA_HOLE_START;
592 
593 	saddr /= AMDGPU_GPU_PAGE_SIZE;
594 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
595 
596 	if (eaddr >= adev->vm_manager.max_pfn) {
597 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
598 			eaddr, adev->vm_manager.max_pfn);
599 		return -EINVAL;
600 	}
601 
602 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
603 				      adev->vm_manager.root_level, ats);
604 }
605 
606 /**
607  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
608  *
609  * @adev: amdgpu_device pointer
610  */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)611 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
612 {
613 	const struct amdgpu_ip_block *ip_block;
614 	bool has_compute_vm_bug;
615 	struct amdgpu_ring *ring;
616 	int i;
617 
618 	has_compute_vm_bug = false;
619 
620 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
621 	if (ip_block) {
622 		/* Compute has a VM bug for GFX version < 7.
623 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
624 		if (ip_block->version->major <= 7)
625 			has_compute_vm_bug = true;
626 		else if (ip_block->version->major == 8)
627 			if (adev->gfx.mec_fw_version < 673)
628 				has_compute_vm_bug = true;
629 	}
630 
631 	for (i = 0; i < adev->num_rings; i++) {
632 		ring = adev->rings[i];
633 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
634 			/* only compute rings */
635 			ring->has_compute_vm_bug = has_compute_vm_bug;
636 		else
637 			ring->has_compute_vm_bug = false;
638 	}
639 }
640 
641 /**
642  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
643  *
644  * @ring: ring on which the job will be submitted
645  * @job: job to submit
646  *
647  * Returns:
648  * True if sync is needed.
649  */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)650 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
651 				  struct amdgpu_job *job)
652 {
653 	struct amdgpu_device *adev = ring->adev;
654 	unsigned vmhub = ring->funcs->vmhub;
655 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
656 	struct amdgpu_vmid *id;
657 	bool gds_switch_needed;
658 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
659 
660 	if (job->vmid == 0)
661 		return false;
662 	id = &id_mgr->ids[job->vmid];
663 	gds_switch_needed = ring->funcs->emit_gds_switch && (
664 		id->gds_base != job->gds_base ||
665 		id->gds_size != job->gds_size ||
666 		id->gws_base != job->gws_base ||
667 		id->gws_size != job->gws_size ||
668 		id->oa_base != job->oa_base ||
669 		id->oa_size != job->oa_size);
670 
671 	if (amdgpu_vmid_had_gpu_reset(adev, id))
672 		return true;
673 
674 	return vm_flush_needed || gds_switch_needed;
675 }
676 
677 /**
678  * amdgpu_vm_flush - hardware flush the vm
679  *
680  * @ring: ring to use for flush
681  * @job:  related job
682  * @need_pipe_sync: is pipe sync needed
683  *
684  * Emit a VM flush when it is necessary.
685  *
686  * Returns:
687  * 0 on success, errno otherwise.
688  */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)689 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
690 {
691 	struct amdgpu_device *adev = ring->adev;
692 	unsigned vmhub = ring->funcs->vmhub;
693 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
694 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
695 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
696 		id->gds_base != job->gds_base ||
697 		id->gds_size != job->gds_size ||
698 		id->gws_base != job->gws_base ||
699 		id->gws_size != job->gws_size ||
700 		id->oa_base != job->oa_base ||
701 		id->oa_size != job->oa_size);
702 	bool vm_flush_needed = job->vm_needs_flush;
703 	bool pasid_mapping_needed = id->pasid != job->pasid ||
704 		!id->pasid_mapping ||
705 		!dma_fence_is_signaled(id->pasid_mapping);
706 	struct dma_fence *fence = NULL;
707 	unsigned patch_offset = 0;
708 	int r;
709 
710 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
711 		gds_switch_needed = true;
712 		vm_flush_needed = true;
713 		pasid_mapping_needed = true;
714 	}
715 
716 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
717 	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
718 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
719 		ring->funcs->emit_wreg;
720 
721 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
722 		return 0;
723 
724 	if (ring->funcs->init_cond_exec)
725 		patch_offset = amdgpu_ring_init_cond_exec(ring);
726 
727 	if (need_pipe_sync)
728 		amdgpu_ring_emit_pipeline_sync(ring);
729 
730 	if (vm_flush_needed) {
731 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
732 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
733 	}
734 
735 	if (pasid_mapping_needed)
736 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
737 
738 	if (vm_flush_needed || pasid_mapping_needed) {
739 		r = amdgpu_fence_emit(ring, &fence, 0);
740 		if (r)
741 			return r;
742 	}
743 
744 	if (vm_flush_needed) {
745 		mutex_lock(&id_mgr->lock);
746 		dma_fence_put(id->last_flush);
747 		id->last_flush = dma_fence_get(fence);
748 		id->current_gpu_reset_count =
749 			atomic_read(&adev->gpu_reset_counter);
750 		mutex_unlock(&id_mgr->lock);
751 	}
752 
753 	if (pasid_mapping_needed) {
754 		id->pasid = job->pasid;
755 		dma_fence_put(id->pasid_mapping);
756 		id->pasid_mapping = dma_fence_get(fence);
757 	}
758 	dma_fence_put(fence);
759 
760 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
761 		id->gds_base = job->gds_base;
762 		id->gds_size = job->gds_size;
763 		id->gws_base = job->gws_base;
764 		id->gws_size = job->gws_size;
765 		id->oa_base = job->oa_base;
766 		id->oa_size = job->oa_size;
767 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
768 					    job->gds_size, job->gws_base,
769 					    job->gws_size, job->oa_base,
770 					    job->oa_size);
771 	}
772 
773 	if (ring->funcs->patch_cond_exec)
774 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
775 
776 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
777 	if (ring->funcs->emit_switch_buffer) {
778 		amdgpu_ring_emit_switch_buffer(ring);
779 		amdgpu_ring_emit_switch_buffer(ring);
780 	}
781 	return 0;
782 }
783 
784 /**
785  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
786  *
787  * @vm: requested vm
788  * @bo: requested buffer object
789  *
790  * Find @bo inside the requested vm.
791  * Search inside the @bos vm list for the requested vm
792  * Returns the found bo_va or NULL if none is found
793  *
794  * Object has to be reserved!
795  *
796  * Returns:
797  * Found bo_va or NULL.
798  */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)799 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
800 				       struct amdgpu_bo *bo)
801 {
802 	struct amdgpu_bo_va *bo_va;
803 
804 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
805 		if (bo_va->base.vm == vm) {
806 			return bo_va;
807 		}
808 	}
809 	return NULL;
810 }
811 
812 /**
813  * amdgpu_vm_do_set_ptes - helper to call the right asic function
814  *
815  * @params: see amdgpu_pte_update_params definition
816  * @bo: PD/PT to update
817  * @pe: addr of the page entry
818  * @addr: dst addr to write into pe
819  * @count: number of page entries to update
820  * @incr: increase next addr by incr bytes
821  * @flags: hw access flags
822  *
823  * Traces the parameters and calls the right asic functions
824  * to setup the page table using the DMA.
825  */
amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params * params,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)826 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
827 				  struct amdgpu_bo *bo,
828 				  uint64_t pe, uint64_t addr,
829 				  unsigned count, uint32_t incr,
830 				  uint64_t flags)
831 {
832 	pe += amdgpu_bo_gpu_offset(bo);
833 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
834 
835 	if (count < 3) {
836 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
837 				    addr | flags, count, incr);
838 
839 	} else {
840 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
841 				      count, incr, flags);
842 	}
843 }
844 
845 /**
846  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
847  *
848  * @params: see amdgpu_pte_update_params definition
849  * @bo: PD/PT to update
850  * @pe: addr of the page entry
851  * @addr: dst addr to write into pe
852  * @count: number of page entries to update
853  * @incr: increase next addr by incr bytes
854  * @flags: hw access flags
855  *
856  * Traces the parameters and calls the DMA function to copy the PTEs.
857  */
amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params * params,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)858 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
859 				   struct amdgpu_bo *bo,
860 				   uint64_t pe, uint64_t addr,
861 				   unsigned count, uint32_t incr,
862 				   uint64_t flags)
863 {
864 	uint64_t src = (params->src + (addr >> 12) * 8);
865 
866 	pe += amdgpu_bo_gpu_offset(bo);
867 	trace_amdgpu_vm_copy_ptes(pe, src, count);
868 
869 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
870 }
871 
872 /**
873  * amdgpu_vm_map_gart - Resolve gart mapping of addr
874  *
875  * @pages_addr: optional DMA address to use for lookup
876  * @addr: the unmapped addr
877  *
878  * Look up the physical address of the page that the pte resolves
879  * to.
880  *
881  * Returns:
882  * The pointer for the page table entry.
883  */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)884 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
885 {
886 	uint64_t result;
887 
888 	/* page table offset */
889 	result = pages_addr[addr >> PAGE_SHIFT];
890 
891 	/* in case cpu page size != gpu page size*/
892 	result |= addr & (~PAGE_MASK);
893 
894 	result &= 0xFFFFFFFFFFFFF000ULL;
895 
896 	return result;
897 }
898 
899 /**
900  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
901  *
902  * @params: see amdgpu_pte_update_params definition
903  * @bo: PD/PT to update
904  * @pe: kmap addr of the page entry
905  * @addr: dst addr to write into pe
906  * @count: number of page entries to update
907  * @incr: increase next addr by incr bytes
908  * @flags: hw access flags
909  *
910  * Write count number of PT/PD entries directly.
911  */
amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params * params,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)912 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
913 				   struct amdgpu_bo *bo,
914 				   uint64_t pe, uint64_t addr,
915 				   unsigned count, uint32_t incr,
916 				   uint64_t flags)
917 {
918 	unsigned int i;
919 	uint64_t value;
920 
921 	pe += (unsigned long)amdgpu_bo_kptr(bo);
922 
923 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
924 
925 	for (i = 0; i < count; i++) {
926 		value = params->pages_addr ?
927 			amdgpu_vm_map_gart(params->pages_addr, addr) :
928 			addr;
929 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
930 				       i, value, flags);
931 		addr += incr;
932 	}
933 }
934 
935 
936 /**
937  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
938  *
939  * @adev: amdgpu_device pointer
940  * @vm: related vm
941  * @owner: fence owner
942  *
943  * Returns:
944  * 0 on success, errno otherwise.
945  */
amdgpu_vm_wait_pd(struct amdgpu_device * adev,struct amdgpu_vm * vm,void * owner)946 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
947 			     void *owner)
948 {
949 	struct amdgpu_sync sync;
950 	int r;
951 
952 	amdgpu_sync_create(&sync);
953 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
954 	r = amdgpu_sync_wait(&sync, true);
955 	amdgpu_sync_free(&sync);
956 
957 	return r;
958 }
959 
960 /*
961  * amdgpu_vm_update_pde - update a single level in the hierarchy
962  *
963  * @param: parameters for the update
964  * @vm: requested vm
965  * @parent: parent directory
966  * @entry: entry to update
967  *
968  * Makes sure the requested entry in parent is up to date.
969  */
amdgpu_vm_update_pde(struct amdgpu_pte_update_params * params,struct amdgpu_vm * vm,struct amdgpu_vm_pt * parent,struct amdgpu_vm_pt * entry)970 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
971 				 struct amdgpu_vm *vm,
972 				 struct amdgpu_vm_pt *parent,
973 				 struct amdgpu_vm_pt *entry)
974 {
975 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
976 	uint64_t pde, pt, flags;
977 	unsigned level;
978 
979 	/* Don't update huge pages here */
980 	if (entry->huge)
981 		return;
982 
983 	for (level = 0, pbo = bo->parent; pbo; ++level)
984 		pbo = pbo->parent;
985 
986 	level += params->adev->vm_manager.root_level;
987 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
988 	flags = AMDGPU_PTE_VALID;
989 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
990 	pde = (entry - parent->entries) * 8;
991 	if (bo->shadow)
992 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
993 	params->func(params, bo, pde, pt, 1, 0, flags);
994 }
995 
996 /*
997  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
998  *
999  * @adev: amdgpu_device pointer
1000  * @vm: related vm
1001  * @parent: parent PD
1002  * @level: VMPT level
1003  *
1004  * Mark all PD level as invalid after an error.
1005  */
amdgpu_vm_invalidate_level(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt * parent,unsigned level)1006 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1007 				       struct amdgpu_vm *vm,
1008 				       struct amdgpu_vm_pt *parent,
1009 				       unsigned level)
1010 {
1011 	unsigned pt_idx, num_entries;
1012 
1013 	/*
1014 	 * Recurse into the subdirectories. This recursion is harmless because
1015 	 * we only have a maximum of 5 layers.
1016 	 */
1017 	num_entries = amdgpu_vm_num_entries(adev, level);
1018 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1019 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1020 
1021 		if (!entry->base.bo)
1022 			continue;
1023 
1024 		if (!entry->base.moved)
1025 			list_move(&entry->base.vm_status, &vm->relocated);
1026 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1027 	}
1028 }
1029 
1030 /*
1031  * amdgpu_vm_update_directories - make sure that all directories are valid
1032  *
1033  * @adev: amdgpu_device pointer
1034  * @vm: requested vm
1035  *
1036  * Makes sure all directories are up to date.
1037  *
1038  * Returns:
1039  * 0 for success, error for failure.
1040  */
amdgpu_vm_update_directories(struct amdgpu_device * adev,struct amdgpu_vm * vm)1041 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1042 				 struct amdgpu_vm *vm)
1043 {
1044 	struct amdgpu_pte_update_params params;
1045 	struct amdgpu_job *job;
1046 	unsigned ndw = 0;
1047 	int r = 0;
1048 
1049 	if (list_empty(&vm->relocated))
1050 		return 0;
1051 
1052 restart:
1053 	memset(&params, 0, sizeof(params));
1054 	params.adev = adev;
1055 
1056 	if (vm->use_cpu_for_update) {
1057 		struct amdgpu_vm_bo_base *bo_base;
1058 
1059 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1060 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1061 			if (unlikely(r))
1062 				return r;
1063 		}
1064 
1065 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1066 		if (unlikely(r))
1067 			return r;
1068 
1069 		params.func = amdgpu_vm_cpu_set_ptes;
1070 	} else {
1071 		ndw = 512 * 8;
1072 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1073 		if (r)
1074 			return r;
1075 
1076 		params.ib = &job->ibs[0];
1077 		params.func = amdgpu_vm_do_set_ptes;
1078 	}
1079 
1080 	while (!list_empty(&vm->relocated)) {
1081 		struct amdgpu_vm_bo_base *bo_base, *parent;
1082 		struct amdgpu_vm_pt *pt, *entry;
1083 		struct amdgpu_bo *bo;
1084 
1085 		bo_base = list_first_entry(&vm->relocated,
1086 					   struct amdgpu_vm_bo_base,
1087 					   vm_status);
1088 		bo_base->moved = false;
1089 		list_del_init(&bo_base->vm_status);
1090 
1091 		bo = bo_base->bo->parent;
1092 		if (!bo)
1093 			continue;
1094 
1095 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1096 					  bo_list);
1097 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1098 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1099 
1100 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1101 
1102 		if (!vm->use_cpu_for_update &&
1103 		    (ndw - params.ib->length_dw) < 32)
1104 			break;
1105 	}
1106 
1107 	if (vm->use_cpu_for_update) {
1108 		/* Flush HDP */
1109 		mb();
1110 		amdgpu_asic_flush_hdp(adev, NULL);
1111 	} else if (params.ib->length_dw == 0) {
1112 		amdgpu_job_free(job);
1113 	} else {
1114 		struct amdgpu_bo *root = vm->root.base.bo;
1115 		struct amdgpu_ring *ring;
1116 		struct dma_fence *fence;
1117 
1118 		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1119 				    sched);
1120 
1121 		amdgpu_ring_pad_ib(ring, params.ib);
1122 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1123 				 AMDGPU_FENCE_OWNER_VM, false);
1124 		WARN_ON(params.ib->length_dw > ndw);
1125 		r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1126 				      &fence);
1127 		if (r)
1128 			goto error;
1129 
1130 		amdgpu_bo_fence(root, fence, true);
1131 		dma_fence_put(vm->last_update);
1132 		vm->last_update = fence;
1133 	}
1134 
1135 	if (!list_empty(&vm->relocated))
1136 		goto restart;
1137 
1138 	return 0;
1139 
1140 error:
1141 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1142 				   adev->vm_manager.root_level);
1143 	amdgpu_job_free(job);
1144 	return r;
1145 }
1146 
1147 /**
1148  * amdgpu_vm_find_entry - find the entry for an address
1149  *
1150  * @p: see amdgpu_pte_update_params definition
1151  * @addr: virtual address in question
1152  * @entry: resulting entry or NULL
1153  * @parent: parent entry
1154  *
1155  * Find the vm_pt entry and it's parent for the given address.
1156  */
amdgpu_vm_get_entry(struct amdgpu_pte_update_params * p,uint64_t addr,struct amdgpu_vm_pt ** entry,struct amdgpu_vm_pt ** parent)1157 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1158 			 struct amdgpu_vm_pt **entry,
1159 			 struct amdgpu_vm_pt **parent)
1160 {
1161 	unsigned level = p->adev->vm_manager.root_level;
1162 
1163 	*parent = NULL;
1164 	*entry = &p->vm->root;
1165 	while ((*entry)->entries) {
1166 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1167 
1168 		*parent = *entry;
1169 		*entry = &(*entry)->entries[addr >> shift];
1170 		addr &= (1ULL << shift) - 1;
1171 	}
1172 
1173 	if (level != AMDGPU_VM_PTB)
1174 		*entry = NULL;
1175 }
1176 
1177 /**
1178  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1179  *
1180  * @p: see amdgpu_pte_update_params definition
1181  * @entry: vm_pt entry to check
1182  * @parent: parent entry
1183  * @nptes: number of PTEs updated with this operation
1184  * @dst: destination address where the PTEs should point to
1185  * @flags: access flags fro the PTEs
1186  *
1187  * Check if we can update the PD with a huge page.
1188  */
amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params * p,struct amdgpu_vm_pt * entry,struct amdgpu_vm_pt * parent,unsigned nptes,uint64_t dst,uint64_t flags)1189 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1190 					struct amdgpu_vm_pt *entry,
1191 					struct amdgpu_vm_pt *parent,
1192 					unsigned nptes, uint64_t dst,
1193 					uint64_t flags)
1194 {
1195 	uint64_t pde;
1196 
1197 	/* In the case of a mixed PT the PDE must point to it*/
1198 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1199 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1200 		/* Set the huge page flag to stop scanning at this PDE */
1201 		flags |= AMDGPU_PDE_PTE;
1202 	}
1203 
1204 	if (!(flags & AMDGPU_PDE_PTE)) {
1205 		if (entry->huge) {
1206 			/* Add the entry to the relocated list to update it. */
1207 			entry->huge = false;
1208 			list_move(&entry->base.vm_status, &p->vm->relocated);
1209 		}
1210 		return;
1211 	}
1212 
1213 	entry->huge = true;
1214 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1215 
1216 	pde = (entry - parent->entries) * 8;
1217 	if (parent->base.bo->shadow)
1218 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1219 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1220 }
1221 
1222 /**
1223  * amdgpu_vm_update_ptes - make sure that page tables are valid
1224  *
1225  * @params: see amdgpu_pte_update_params definition
1226  * @start: start of GPU address range
1227  * @end: end of GPU address range
1228  * @dst: destination address to map to, the next dst inside the function
1229  * @flags: mapping flags
1230  *
1231  * Update the page tables in the range @start - @end.
1232  *
1233  * Returns:
1234  * 0 for success, -EINVAL for failure.
1235  */
amdgpu_vm_update_ptes(struct amdgpu_pte_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)1236 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1237 				  uint64_t start, uint64_t end,
1238 				  uint64_t dst, uint64_t flags)
1239 {
1240 	struct amdgpu_device *adev = params->adev;
1241 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1242 
1243 	uint64_t addr, pe_start;
1244 	struct amdgpu_bo *pt;
1245 	unsigned nptes;
1246 
1247 	/* walk over the address space and update the page tables */
1248 	for (addr = start; addr < end; addr += nptes,
1249 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1250 		struct amdgpu_vm_pt *entry, *parent;
1251 
1252 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1253 		if (!entry)
1254 			return -ENOENT;
1255 
1256 		if ((addr & ~mask) == (end & ~mask))
1257 			nptes = end - addr;
1258 		else
1259 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1260 
1261 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1262 					    nptes, dst, flags);
1263 		/* We don't need to update PTEs for huge pages */
1264 		if (entry->huge)
1265 			continue;
1266 
1267 		pt = entry->base.bo;
1268 		pe_start = (addr & mask) * 8;
1269 		if (pt->shadow)
1270 			params->func(params, pt->shadow, pe_start, dst, nptes,
1271 				     AMDGPU_GPU_PAGE_SIZE, flags);
1272 		params->func(params, pt, pe_start, dst, nptes,
1273 			     AMDGPU_GPU_PAGE_SIZE, flags);
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 /*
1280  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1281  *
1282  * @params: see amdgpu_pte_update_params definition
1283  * @vm: requested vm
1284  * @start: first PTE to handle
1285  * @end: last PTE to handle
1286  * @dst: addr those PTEs should point to
1287  * @flags: hw mapping flags
1288  *
1289  * Returns:
1290  * 0 for success, -EINVAL for failure.
1291  */
amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)1292 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1293 				uint64_t start, uint64_t end,
1294 				uint64_t dst, uint64_t flags)
1295 {
1296 	/**
1297 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1298 	 * field in the PTE. When this field is set to a non-zero value, page
1299 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1300 	 * flags are considered valid for all PTEs within the fragment range
1301 	 * and corresponding mappings are assumed to be physically contiguous.
1302 	 *
1303 	 * The L1 TLB can store a single PTE for the whole fragment,
1304 	 * significantly increasing the space available for translation
1305 	 * caching. This leads to large improvements in throughput when the
1306 	 * TLB is under pressure.
1307 	 *
1308 	 * The L2 TLB distributes small and large fragments into two
1309 	 * asymmetric partitions. The large fragment cache is significantly
1310 	 * larger. Thus, we try to use large fragments wherever possible.
1311 	 * Userspace can support this by aligning virtual base address and
1312 	 * allocation size to the fragment size.
1313 	 */
1314 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1315 	int r;
1316 
1317 	/* system pages are non continuously */
1318 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1319 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1320 
1321 	while (start != end) {
1322 		uint64_t frag_flags, frag_end;
1323 		unsigned frag;
1324 
1325 		/* This intentionally wraps around if no bit is set */
1326 		frag = min((unsigned)ffs(start) - 1,
1327 			   (unsigned)fls64(end - start) - 1);
1328 		if (frag >= max_frag) {
1329 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1330 			frag_end = end & ~((1ULL << max_frag) - 1);
1331 		} else {
1332 			frag_flags = AMDGPU_PTE_FRAG(frag);
1333 			frag_end = start + (1 << frag);
1334 		}
1335 
1336 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1337 					  flags | frag_flags);
1338 		if (r)
1339 			return r;
1340 
1341 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1342 		start = frag_end;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1350  *
1351  * @adev: amdgpu_device pointer
1352  * @exclusive: fence we need to sync to
1353  * @pages_addr: DMA addresses to use for mapping
1354  * @vm: requested vm
1355  * @start: start of mapped range
1356  * @last: last mapped entry
1357  * @flags: flags for the entries
1358  * @addr: addr to set the area to
1359  * @fence: optional resulting fence
1360  *
1361  * Fill in the page table entries between @start and @last.
1362  *
1363  * Returns:
1364  * 0 for success, -EINVAL for failure.
1365  */
amdgpu_vm_bo_update_mapping(struct amdgpu_device * adev,struct dma_fence * exclusive,dma_addr_t * pages_addr,struct amdgpu_vm * vm,uint64_t start,uint64_t last,uint64_t flags,uint64_t addr,struct dma_fence ** fence)1366 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1367 				       struct dma_fence *exclusive,
1368 				       dma_addr_t *pages_addr,
1369 				       struct amdgpu_vm *vm,
1370 				       uint64_t start, uint64_t last,
1371 				       uint64_t flags, uint64_t addr,
1372 				       struct dma_fence **fence)
1373 {
1374 	struct amdgpu_ring *ring;
1375 	void *owner = AMDGPU_FENCE_OWNER_VM;
1376 	unsigned nptes, ncmds, ndw;
1377 	struct amdgpu_job *job;
1378 	struct amdgpu_pte_update_params params;
1379 	struct dma_fence *f = NULL;
1380 	int r;
1381 
1382 	memset(&params, 0, sizeof(params));
1383 	params.adev = adev;
1384 	params.vm = vm;
1385 
1386 	/* sync to everything on unmapping */
1387 	if (!(flags & AMDGPU_PTE_VALID))
1388 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1389 
1390 	if (vm->use_cpu_for_update) {
1391 		/* params.src is used as flag to indicate system Memory */
1392 		if (pages_addr)
1393 			params.src = ~0;
1394 
1395 		/* Wait for PT BOs to be free. PTs share the same resv. object
1396 		 * as the root PD BO
1397 		 */
1398 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1399 		if (unlikely(r))
1400 			return r;
1401 
1402 		params.func = amdgpu_vm_cpu_set_ptes;
1403 		params.pages_addr = pages_addr;
1404 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1405 					   addr, flags);
1406 	}
1407 
1408 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1409 
1410 	nptes = last - start + 1;
1411 
1412 	/*
1413 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1414 	 *  entries or 2k dwords (whatever is smaller)
1415          *
1416          * The second command is for the shadow pagetables.
1417 	 */
1418 	if (vm->root.base.bo->shadow)
1419 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1420 	else
1421 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1422 
1423 	/* padding, etc. */
1424 	ndw = 64;
1425 
1426 	if (pages_addr) {
1427 		/* copy commands needed */
1428 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1429 
1430 		/* and also PTEs */
1431 		ndw += nptes * 2;
1432 
1433 		params.func = amdgpu_vm_do_copy_ptes;
1434 
1435 	} else {
1436 		/* set page commands needed */
1437 		ndw += ncmds * 10;
1438 
1439 		/* extra commands for begin/end fragments */
1440 		if (vm->root.base.bo->shadow)
1441 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1442 		else
1443 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1444 
1445 		params.func = amdgpu_vm_do_set_ptes;
1446 	}
1447 
1448 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1449 	if (r)
1450 		return r;
1451 
1452 	params.ib = &job->ibs[0];
1453 
1454 	if (pages_addr) {
1455 		uint64_t *pte;
1456 		unsigned i;
1457 
1458 		/* Put the PTEs at the end of the IB. */
1459 		i = ndw - nptes * 2;
1460 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1461 		params.src = job->ibs->gpu_addr + i * 4;
1462 
1463 		for (i = 0; i < nptes; ++i) {
1464 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1465 						    AMDGPU_GPU_PAGE_SIZE);
1466 			pte[i] |= flags;
1467 		}
1468 		addr = 0;
1469 	}
1470 
1471 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1472 	if (r)
1473 		goto error_free;
1474 
1475 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1476 			     owner, false);
1477 	if (r)
1478 		goto error_free;
1479 
1480 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1481 	if (r)
1482 		goto error_free;
1483 
1484 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1485 	if (r)
1486 		goto error_free;
1487 
1488 	amdgpu_ring_pad_ib(ring, params.ib);
1489 	WARN_ON(params.ib->length_dw > ndw);
1490 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1491 	if (r)
1492 		goto error_free;
1493 
1494 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1495 	dma_fence_put(*fence);
1496 	*fence = f;
1497 	return 0;
1498 
1499 error_free:
1500 	amdgpu_job_free(job);
1501 	return r;
1502 }
1503 
1504 /**
1505  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1506  *
1507  * @adev: amdgpu_device pointer
1508  * @exclusive: fence we need to sync to
1509  * @pages_addr: DMA addresses to use for mapping
1510  * @vm: requested vm
1511  * @mapping: mapped range and flags to use for the update
1512  * @flags: HW flags for the mapping
1513  * @nodes: array of drm_mm_nodes with the MC addresses
1514  * @fence: optional resulting fence
1515  *
1516  * Split the mapping into smaller chunks so that each update fits
1517  * into a SDMA IB.
1518  *
1519  * Returns:
1520  * 0 for success, -EINVAL for failure.
1521  */
amdgpu_vm_bo_split_mapping(struct amdgpu_device * adev,struct dma_fence * exclusive,dma_addr_t * pages_addr,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,uint64_t flags,struct drm_mm_node * nodes,struct dma_fence ** fence)1522 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1523 				      struct dma_fence *exclusive,
1524 				      dma_addr_t *pages_addr,
1525 				      struct amdgpu_vm *vm,
1526 				      struct amdgpu_bo_va_mapping *mapping,
1527 				      uint64_t flags,
1528 				      struct drm_mm_node *nodes,
1529 				      struct dma_fence **fence)
1530 {
1531 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1532 	uint64_t pfn, start = mapping->start;
1533 	int r;
1534 
1535 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1536 	 * but in case of something, we filter the flags in first place
1537 	 */
1538 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1539 		flags &= ~AMDGPU_PTE_READABLE;
1540 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1541 		flags &= ~AMDGPU_PTE_WRITEABLE;
1542 
1543 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1544 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1545 
1546 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1547 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1548 
1549 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1550 	    (adev->asic_type >= CHIP_VEGA10)) {
1551 		flags |= AMDGPU_PTE_PRT;
1552 		flags &= ~AMDGPU_PTE_VALID;
1553 	}
1554 
1555 	trace_amdgpu_vm_bo_update(mapping);
1556 
1557 	pfn = mapping->offset >> PAGE_SHIFT;
1558 	if (nodes) {
1559 		while (pfn >= nodes->size) {
1560 			pfn -= nodes->size;
1561 			++nodes;
1562 		}
1563 	}
1564 
1565 	do {
1566 		dma_addr_t *dma_addr = NULL;
1567 		uint64_t max_entries;
1568 		uint64_t addr, last;
1569 
1570 		if (nodes) {
1571 			addr = nodes->start << PAGE_SHIFT;
1572 			max_entries = (nodes->size - pfn) *
1573 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1574 		} else {
1575 			addr = 0;
1576 			max_entries = S64_MAX;
1577 		}
1578 
1579 		if (pages_addr) {
1580 			uint64_t count;
1581 
1582 			max_entries = min(max_entries, 16ull * 1024ull);
1583 			for (count = 1;
1584 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1585 			     ++count) {
1586 				uint64_t idx = pfn + count;
1587 
1588 				if (pages_addr[idx] !=
1589 				    (pages_addr[idx - 1] + PAGE_SIZE))
1590 					break;
1591 			}
1592 
1593 			if (count < min_linear_pages) {
1594 				addr = pfn << PAGE_SHIFT;
1595 				dma_addr = pages_addr;
1596 			} else {
1597 				addr = pages_addr[pfn];
1598 				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1599 			}
1600 
1601 		} else if (flags & AMDGPU_PTE_VALID) {
1602 			addr += adev->vm_manager.vram_base_offset;
1603 			addr += pfn << PAGE_SHIFT;
1604 		}
1605 
1606 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1607 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1608 						start, last, flags, addr,
1609 						fence);
1610 		if (r)
1611 			return r;
1612 
1613 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1614 		if (nodes && nodes->size == pfn) {
1615 			pfn = 0;
1616 			++nodes;
1617 		}
1618 		start = last + 1;
1619 
1620 	} while (unlikely(start != mapping->last + 1));
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1627  *
1628  * @adev: amdgpu_device pointer
1629  * @bo_va: requested BO and VM object
1630  * @clear: if true clear the entries
1631  *
1632  * Fill in the page table entries for @bo_va.
1633  *
1634  * Returns:
1635  * 0 for success, -EINVAL for failure.
1636  */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1637 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1638 			struct amdgpu_bo_va *bo_va,
1639 			bool clear)
1640 {
1641 	struct amdgpu_bo *bo = bo_va->base.bo;
1642 	struct amdgpu_vm *vm = bo_va->base.vm;
1643 	struct amdgpu_bo_va_mapping *mapping;
1644 	dma_addr_t *pages_addr = NULL;
1645 	struct ttm_mem_reg *mem;
1646 	struct drm_mm_node *nodes;
1647 	struct dma_fence *exclusive, **last_update;
1648 	uint64_t flags;
1649 	int r;
1650 
1651 	if (clear || !bo) {
1652 		mem = NULL;
1653 		nodes = NULL;
1654 		exclusive = NULL;
1655 	} else {
1656 		struct ttm_dma_tt *ttm;
1657 
1658 		mem = &bo->tbo.mem;
1659 		nodes = mem->mm_node;
1660 		if (mem->mem_type == TTM_PL_TT) {
1661 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1662 			pages_addr = ttm->dma_address;
1663 		}
1664 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1665 	}
1666 
1667 	if (bo)
1668 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1669 	else
1670 		flags = 0x0;
1671 
1672 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1673 		last_update = &vm->last_update;
1674 	else
1675 		last_update = &bo_va->last_pt_update;
1676 
1677 	if (!clear && bo_va->base.moved) {
1678 		bo_va->base.moved = false;
1679 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1680 
1681 	} else if (bo_va->cleared != clear) {
1682 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1683 	}
1684 
1685 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1686 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1687 					       mapping, flags, nodes,
1688 					       last_update);
1689 		if (r)
1690 			return r;
1691 	}
1692 
1693 	if (vm->use_cpu_for_update) {
1694 		/* Flush HDP */
1695 		mb();
1696 		amdgpu_asic_flush_hdp(adev, NULL);
1697 	}
1698 
1699 	spin_lock(&vm->moved_lock);
1700 	list_del_init(&bo_va->base.vm_status);
1701 	spin_unlock(&vm->moved_lock);
1702 
1703 	/* If the BO is not in its preferred location add it back to
1704 	 * the evicted list so that it gets validated again on the
1705 	 * next command submission.
1706 	 */
1707 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1708 		uint32_t mem_type = bo->tbo.mem.mem_type;
1709 
1710 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1711 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1712 		else
1713 			list_add(&bo_va->base.vm_status, &vm->idle);
1714 	}
1715 
1716 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1717 	bo_va->cleared = clear;
1718 
1719 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1720 		list_for_each_entry(mapping, &bo_va->valids, list)
1721 			trace_amdgpu_vm_bo_mapping(mapping);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 /**
1728  * amdgpu_vm_update_prt_state - update the global PRT state
1729  *
1730  * @adev: amdgpu_device pointer
1731  */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1732 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1733 {
1734 	unsigned long flags;
1735 	bool enable;
1736 
1737 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1738 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1739 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1740 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1741 }
1742 
1743 /**
1744  * amdgpu_vm_prt_get - add a PRT user
1745  *
1746  * @adev: amdgpu_device pointer
1747  */
amdgpu_vm_prt_get(struct amdgpu_device * adev)1748 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1749 {
1750 	if (!adev->gmc.gmc_funcs->set_prt)
1751 		return;
1752 
1753 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1754 		amdgpu_vm_update_prt_state(adev);
1755 }
1756 
1757 /**
1758  * amdgpu_vm_prt_put - drop a PRT user
1759  *
1760  * @adev: amdgpu_device pointer
1761  */
amdgpu_vm_prt_put(struct amdgpu_device * adev)1762 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1763 {
1764 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1765 		amdgpu_vm_update_prt_state(adev);
1766 }
1767 
1768 /**
1769  * amdgpu_vm_prt_cb - callback for updating the PRT status
1770  *
1771  * @fence: fence for the callback
1772  * @_cb: the callback function
1773  */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)1774 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1775 {
1776 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1777 
1778 	amdgpu_vm_prt_put(cb->adev);
1779 	kfree(cb);
1780 }
1781 
1782 /**
1783  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1784  *
1785  * @adev: amdgpu_device pointer
1786  * @fence: fence for the callback
1787  */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)1788 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1789 				 struct dma_fence *fence)
1790 {
1791 	struct amdgpu_prt_cb *cb;
1792 
1793 	if (!adev->gmc.gmc_funcs->set_prt)
1794 		return;
1795 
1796 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1797 	if (!cb) {
1798 		/* Last resort when we are OOM */
1799 		if (fence)
1800 			dma_fence_wait(fence, false);
1801 
1802 		amdgpu_vm_prt_put(adev);
1803 	} else {
1804 		cb->adev = adev;
1805 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1806 						     amdgpu_vm_prt_cb))
1807 			amdgpu_vm_prt_cb(fence, &cb->cb);
1808 	}
1809 }
1810 
1811 /**
1812  * amdgpu_vm_free_mapping - free a mapping
1813  *
1814  * @adev: amdgpu_device pointer
1815  * @vm: requested vm
1816  * @mapping: mapping to be freed
1817  * @fence: fence of the unmap operation
1818  *
1819  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1820  */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)1821 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1822 				   struct amdgpu_vm *vm,
1823 				   struct amdgpu_bo_va_mapping *mapping,
1824 				   struct dma_fence *fence)
1825 {
1826 	if (mapping->flags & AMDGPU_PTE_PRT)
1827 		amdgpu_vm_add_prt_cb(adev, fence);
1828 	kfree(mapping);
1829 }
1830 
1831 /**
1832  * amdgpu_vm_prt_fini - finish all prt mappings
1833  *
1834  * @adev: amdgpu_device pointer
1835  * @vm: requested vm
1836  *
1837  * Register a cleanup callback to disable PRT support after VM dies.
1838  */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1839 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1840 {
1841 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1842 	struct dma_fence *excl, **shared;
1843 	unsigned i, shared_count;
1844 	int r;
1845 
1846 	r = reservation_object_get_fences_rcu(resv, &excl,
1847 					      &shared_count, &shared);
1848 	if (r) {
1849 		/* Not enough memory to grab the fence list, as last resort
1850 		 * block for all the fences to complete.
1851 		 */
1852 		reservation_object_wait_timeout_rcu(resv, true, false,
1853 						    MAX_SCHEDULE_TIMEOUT);
1854 		return;
1855 	}
1856 
1857 	/* Add a callback for each fence in the reservation object */
1858 	amdgpu_vm_prt_get(adev);
1859 	amdgpu_vm_add_prt_cb(adev, excl);
1860 
1861 	for (i = 0; i < shared_count; ++i) {
1862 		amdgpu_vm_prt_get(adev);
1863 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1864 	}
1865 
1866 	kfree(shared);
1867 }
1868 
1869 /**
1870  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1871  *
1872  * @adev: amdgpu_device pointer
1873  * @vm: requested vm
1874  * @fence: optional resulting fence (unchanged if no work needed to be done
1875  * or if an error occurred)
1876  *
1877  * Make sure all freed BOs are cleared in the PT.
1878  * PTs have to be reserved and mutex must be locked!
1879  *
1880  * Returns:
1881  * 0 for success.
1882  *
1883  */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)1884 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1885 			  struct amdgpu_vm *vm,
1886 			  struct dma_fence **fence)
1887 {
1888 	struct amdgpu_bo_va_mapping *mapping;
1889 	uint64_t init_pte_value = 0;
1890 	struct dma_fence *f = NULL;
1891 	int r;
1892 
1893 	while (!list_empty(&vm->freed)) {
1894 		mapping = list_first_entry(&vm->freed,
1895 			struct amdgpu_bo_va_mapping, list);
1896 		list_del(&mapping->list);
1897 
1898 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1899 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1900 
1901 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1902 						mapping->start, mapping->last,
1903 						init_pte_value, 0, &f);
1904 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1905 		if (r) {
1906 			dma_fence_put(f);
1907 			return r;
1908 		}
1909 	}
1910 
1911 	if (fence && f) {
1912 		dma_fence_put(*fence);
1913 		*fence = f;
1914 	} else {
1915 		dma_fence_put(f);
1916 	}
1917 
1918 	return 0;
1919 
1920 }
1921 
1922 /**
1923  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1924  *
1925  * @adev: amdgpu_device pointer
1926  * @vm: requested vm
1927  *
1928  * Make sure all BOs which are moved are updated in the PTs.
1929  *
1930  * Returns:
1931  * 0 for success.
1932  *
1933  * PTs have to be reserved!
1934  */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm)1935 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1936 			   struct amdgpu_vm *vm)
1937 {
1938 	struct amdgpu_bo_va *bo_va, *tmp;
1939 	struct list_head moved;
1940 	bool clear;
1941 	int r;
1942 
1943 	INIT_LIST_HEAD(&moved);
1944 	spin_lock(&vm->moved_lock);
1945 	list_splice_init(&vm->moved, &moved);
1946 	spin_unlock(&vm->moved_lock);
1947 
1948 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
1949 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
1950 
1951 		/* Per VM BOs never need to bo cleared in the page tables */
1952 		if (resv == vm->root.base.bo->tbo.resv)
1953 			clear = false;
1954 		/* Try to reserve the BO to avoid clearing its ptes */
1955 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1956 			clear = false;
1957 		/* Somebody else is using the BO right now */
1958 		else
1959 			clear = true;
1960 
1961 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1962 		if (r) {
1963 			spin_lock(&vm->moved_lock);
1964 			list_splice(&moved, &vm->moved);
1965 			spin_unlock(&vm->moved_lock);
1966 			return r;
1967 		}
1968 
1969 		if (!clear && resv != vm->root.base.bo->tbo.resv)
1970 			reservation_object_unlock(resv);
1971 
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 /**
1978  * amdgpu_vm_bo_add - add a bo to a specific vm
1979  *
1980  * @adev: amdgpu_device pointer
1981  * @vm: requested vm
1982  * @bo: amdgpu buffer object
1983  *
1984  * Add @bo into the requested vm.
1985  * Add @bo to the list of bos associated with the vm
1986  *
1987  * Returns:
1988  * Newly added bo_va or NULL for failure
1989  *
1990  * Object has to be reserved!
1991  */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1992 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1993 				      struct amdgpu_vm *vm,
1994 				      struct amdgpu_bo *bo)
1995 {
1996 	struct amdgpu_bo_va *bo_va;
1997 
1998 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1999 	if (bo_va == NULL) {
2000 		return NULL;
2001 	}
2002 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2003 
2004 	bo_va->ref_count = 1;
2005 	INIT_LIST_HEAD(&bo_va->valids);
2006 	INIT_LIST_HEAD(&bo_va->invalids);
2007 
2008 	return bo_va;
2009 }
2010 
2011 
2012 /**
2013  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2014  *
2015  * @adev: amdgpu_device pointer
2016  * @bo_va: bo_va to store the address
2017  * @mapping: the mapping to insert
2018  *
2019  * Insert a new mapping into all structures.
2020  */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)2021 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2022 				    struct amdgpu_bo_va *bo_va,
2023 				    struct amdgpu_bo_va_mapping *mapping)
2024 {
2025 	struct amdgpu_vm *vm = bo_va->base.vm;
2026 	struct amdgpu_bo *bo = bo_va->base.bo;
2027 
2028 	mapping->bo_va = bo_va;
2029 	list_add(&mapping->list, &bo_va->invalids);
2030 	amdgpu_vm_it_insert(mapping, &vm->va);
2031 
2032 	if (mapping->flags & AMDGPU_PTE_PRT)
2033 		amdgpu_vm_prt_get(adev);
2034 
2035 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2036 	    !bo_va->base.moved) {
2037 		spin_lock(&vm->moved_lock);
2038 		list_move(&bo_va->base.vm_status, &vm->moved);
2039 		spin_unlock(&vm->moved_lock);
2040 	}
2041 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2042 }
2043 
2044 /**
2045  * amdgpu_vm_bo_map - map bo inside a vm
2046  *
2047  * @adev: amdgpu_device pointer
2048  * @bo_va: bo_va to store the address
2049  * @saddr: where to map the BO
2050  * @offset: requested offset in the BO
2051  * @size: BO size in bytes
2052  * @flags: attributes of pages (read/write/valid/etc.)
2053  *
2054  * Add a mapping of the BO at the specefied addr into the VM.
2055  *
2056  * Returns:
2057  * 0 for success, error for failure.
2058  *
2059  * Object has to be reserved and unreserved outside!
2060  */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)2061 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2062 		     struct amdgpu_bo_va *bo_va,
2063 		     uint64_t saddr, uint64_t offset,
2064 		     uint64_t size, uint64_t flags)
2065 {
2066 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2067 	struct amdgpu_bo *bo = bo_va->base.bo;
2068 	struct amdgpu_vm *vm = bo_va->base.vm;
2069 	uint64_t eaddr;
2070 
2071 	/* validate the parameters */
2072 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2073 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2074 		return -EINVAL;
2075 
2076 	/* make sure object fit at this offset */
2077 	eaddr = saddr + size - 1;
2078 	if (saddr >= eaddr ||
2079 	    (bo && offset + size > amdgpu_bo_size(bo)))
2080 		return -EINVAL;
2081 
2082 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2083 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2084 
2085 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2086 	if (tmp) {
2087 		/* bo and tmp overlap, invalid addr */
2088 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2089 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2090 			tmp->start, tmp->last + 1);
2091 		return -EINVAL;
2092 	}
2093 
2094 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2095 	if (!mapping)
2096 		return -ENOMEM;
2097 
2098 	mapping->start = saddr;
2099 	mapping->last = eaddr;
2100 	mapping->offset = offset;
2101 	mapping->flags = flags;
2102 
2103 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2104 
2105 	return 0;
2106 }
2107 
2108 /**
2109  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2110  *
2111  * @adev: amdgpu_device pointer
2112  * @bo_va: bo_va to store the address
2113  * @saddr: where to map the BO
2114  * @offset: requested offset in the BO
2115  * @size: BO size in bytes
2116  * @flags: attributes of pages (read/write/valid/etc.)
2117  *
2118  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2119  * mappings as we do so.
2120  *
2121  * Returns:
2122  * 0 for success, error for failure.
2123  *
2124  * Object has to be reserved and unreserved outside!
2125  */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)2126 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2127 			     struct amdgpu_bo_va *bo_va,
2128 			     uint64_t saddr, uint64_t offset,
2129 			     uint64_t size, uint64_t flags)
2130 {
2131 	struct amdgpu_bo_va_mapping *mapping;
2132 	struct amdgpu_bo *bo = bo_va->base.bo;
2133 	uint64_t eaddr;
2134 	int r;
2135 
2136 	/* validate the parameters */
2137 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2138 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2139 		return -EINVAL;
2140 
2141 	/* make sure object fit at this offset */
2142 	eaddr = saddr + size - 1;
2143 	if (saddr >= eaddr ||
2144 	    (bo && offset + size > amdgpu_bo_size(bo)))
2145 		return -EINVAL;
2146 
2147 	/* Allocate all the needed memory */
2148 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2149 	if (!mapping)
2150 		return -ENOMEM;
2151 
2152 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2153 	if (r) {
2154 		kfree(mapping);
2155 		return r;
2156 	}
2157 
2158 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2159 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2160 
2161 	mapping->start = saddr;
2162 	mapping->last = eaddr;
2163 	mapping->offset = offset;
2164 	mapping->flags = flags;
2165 
2166 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2167 
2168 	return 0;
2169 }
2170 
2171 /**
2172  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2173  *
2174  * @adev: amdgpu_device pointer
2175  * @bo_va: bo_va to remove the address from
2176  * @saddr: where to the BO is mapped
2177  *
2178  * Remove a mapping of the BO at the specefied addr from the VM.
2179  *
2180  * Returns:
2181  * 0 for success, error for failure.
2182  *
2183  * Object has to be reserved and unreserved outside!
2184  */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)2185 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2186 		       struct amdgpu_bo_va *bo_va,
2187 		       uint64_t saddr)
2188 {
2189 	struct amdgpu_bo_va_mapping *mapping;
2190 	struct amdgpu_vm *vm = bo_va->base.vm;
2191 	bool valid = true;
2192 
2193 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2194 
2195 	list_for_each_entry(mapping, &bo_va->valids, list) {
2196 		if (mapping->start == saddr)
2197 			break;
2198 	}
2199 
2200 	if (&mapping->list == &bo_va->valids) {
2201 		valid = false;
2202 
2203 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2204 			if (mapping->start == saddr)
2205 				break;
2206 		}
2207 
2208 		if (&mapping->list == &bo_va->invalids)
2209 			return -ENOENT;
2210 	}
2211 
2212 	list_del(&mapping->list);
2213 	amdgpu_vm_it_remove(mapping, &vm->va);
2214 	mapping->bo_va = NULL;
2215 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2216 
2217 	if (valid)
2218 		list_add(&mapping->list, &vm->freed);
2219 	else
2220 		amdgpu_vm_free_mapping(adev, vm, mapping,
2221 				       bo_va->last_pt_update);
2222 
2223 	return 0;
2224 }
2225 
2226 /**
2227  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2228  *
2229  * @adev: amdgpu_device pointer
2230  * @vm: VM structure to use
2231  * @saddr: start of the range
2232  * @size: size of the range
2233  *
2234  * Remove all mappings in a range, split them as appropriate.
2235  *
2236  * Returns:
2237  * 0 for success, error for failure.
2238  */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)2239 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2240 				struct amdgpu_vm *vm,
2241 				uint64_t saddr, uint64_t size)
2242 {
2243 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2244 	LIST_HEAD(removed);
2245 	uint64_t eaddr;
2246 
2247 	eaddr = saddr + size - 1;
2248 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2249 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2250 
2251 	/* Allocate all the needed memory */
2252 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2253 	if (!before)
2254 		return -ENOMEM;
2255 	INIT_LIST_HEAD(&before->list);
2256 
2257 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2258 	if (!after) {
2259 		kfree(before);
2260 		return -ENOMEM;
2261 	}
2262 	INIT_LIST_HEAD(&after->list);
2263 
2264 	/* Now gather all removed mappings */
2265 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2266 	while (tmp) {
2267 		/* Remember mapping split at the start */
2268 		if (tmp->start < saddr) {
2269 			before->start = tmp->start;
2270 			before->last = saddr - 1;
2271 			before->offset = tmp->offset;
2272 			before->flags = tmp->flags;
2273 			before->bo_va = tmp->bo_va;
2274 			list_add(&before->list, &tmp->bo_va->invalids);
2275 		}
2276 
2277 		/* Remember mapping split at the end */
2278 		if (tmp->last > eaddr) {
2279 			after->start = eaddr + 1;
2280 			after->last = tmp->last;
2281 			after->offset = tmp->offset;
2282 			after->offset += after->start - tmp->start;
2283 			after->flags = tmp->flags;
2284 			after->bo_va = tmp->bo_va;
2285 			list_add(&after->list, &tmp->bo_va->invalids);
2286 		}
2287 
2288 		list_del(&tmp->list);
2289 		list_add(&tmp->list, &removed);
2290 
2291 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2292 	}
2293 
2294 	/* And free them up */
2295 	list_for_each_entry_safe(tmp, next, &removed, list) {
2296 		amdgpu_vm_it_remove(tmp, &vm->va);
2297 		list_del(&tmp->list);
2298 
2299 		if (tmp->start < saddr)
2300 		    tmp->start = saddr;
2301 		if (tmp->last > eaddr)
2302 		    tmp->last = eaddr;
2303 
2304 		tmp->bo_va = NULL;
2305 		list_add(&tmp->list, &vm->freed);
2306 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2307 	}
2308 
2309 	/* Insert partial mapping before the range */
2310 	if (!list_empty(&before->list)) {
2311 		amdgpu_vm_it_insert(before, &vm->va);
2312 		if (before->flags & AMDGPU_PTE_PRT)
2313 			amdgpu_vm_prt_get(adev);
2314 	} else {
2315 		kfree(before);
2316 	}
2317 
2318 	/* Insert partial mapping after the range */
2319 	if (!list_empty(&after->list)) {
2320 		amdgpu_vm_it_insert(after, &vm->va);
2321 		if (after->flags & AMDGPU_PTE_PRT)
2322 			amdgpu_vm_prt_get(adev);
2323 	} else {
2324 		kfree(after);
2325 	}
2326 
2327 	return 0;
2328 }
2329 
2330 /**
2331  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2332  *
2333  * @vm: the requested VM
2334  * @addr: the address
2335  *
2336  * Find a mapping by it's address.
2337  *
2338  * Returns:
2339  * The amdgpu_bo_va_mapping matching for addr or NULL
2340  *
2341  */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)2342 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2343 							 uint64_t addr)
2344 {
2345 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2346 }
2347 
2348 /**
2349  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2350  *
2351  * @vm: the requested vm
2352  * @ticket: CS ticket
2353  *
2354  * Trace all mappings of BOs reserved during a command submission.
2355  */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)2356 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2357 {
2358 	struct amdgpu_bo_va_mapping *mapping;
2359 
2360 	if (!trace_amdgpu_vm_bo_cs_enabled())
2361 		return;
2362 
2363 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2364 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2365 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2366 			struct amdgpu_bo *bo;
2367 
2368 			bo = mapping->bo_va->base.bo;
2369 			if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2370 				continue;
2371 		}
2372 
2373 		trace_amdgpu_vm_bo_cs(mapping);
2374 	}
2375 }
2376 
2377 /**
2378  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2379  *
2380  * @adev: amdgpu_device pointer
2381  * @bo_va: requested bo_va
2382  *
2383  * Remove @bo_va->bo from the requested vm.
2384  *
2385  * Object have to be reserved!
2386  */
amdgpu_vm_bo_rmv(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)2387 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2388 		      struct amdgpu_bo_va *bo_va)
2389 {
2390 	struct amdgpu_bo_va_mapping *mapping, *next;
2391 	struct amdgpu_vm *vm = bo_va->base.vm;
2392 
2393 	list_del(&bo_va->base.bo_list);
2394 
2395 	spin_lock(&vm->moved_lock);
2396 	list_del(&bo_va->base.vm_status);
2397 	spin_unlock(&vm->moved_lock);
2398 
2399 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2400 		list_del(&mapping->list);
2401 		amdgpu_vm_it_remove(mapping, &vm->va);
2402 		mapping->bo_va = NULL;
2403 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2404 		list_add(&mapping->list, &vm->freed);
2405 	}
2406 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2407 		list_del(&mapping->list);
2408 		amdgpu_vm_it_remove(mapping, &vm->va);
2409 		amdgpu_vm_free_mapping(adev, vm, mapping,
2410 				       bo_va->last_pt_update);
2411 	}
2412 
2413 	dma_fence_put(bo_va->last_pt_update);
2414 	kfree(bo_va);
2415 }
2416 
2417 /**
2418  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2419  *
2420  * @adev: amdgpu_device pointer
2421  * @bo: amdgpu buffer object
2422  * @evicted: is the BO evicted
2423  *
2424  * Mark @bo as invalid.
2425  */
amdgpu_vm_bo_invalidate(struct amdgpu_device * adev,struct amdgpu_bo * bo,bool evicted)2426 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2427 			     struct amdgpu_bo *bo, bool evicted)
2428 {
2429 	struct amdgpu_vm_bo_base *bo_base;
2430 
2431 	/* shadow bo doesn't have bo base, its validation needs its parent */
2432 	if (bo->parent && bo->parent->shadow == bo)
2433 		bo = bo->parent;
2434 
2435 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2436 		struct amdgpu_vm *vm = bo_base->vm;
2437 		bool was_moved = bo_base->moved;
2438 
2439 		bo_base->moved = true;
2440 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2441 			if (bo->tbo.type == ttm_bo_type_kernel)
2442 				list_move(&bo_base->vm_status, &vm->evicted);
2443 			else
2444 				list_move_tail(&bo_base->vm_status,
2445 					       &vm->evicted);
2446 			continue;
2447 		}
2448 
2449 		if (was_moved)
2450 			continue;
2451 
2452 		if (bo->tbo.type == ttm_bo_type_kernel) {
2453 			list_move(&bo_base->vm_status, &vm->relocated);
2454 		} else {
2455 			spin_lock(&bo_base->vm->moved_lock);
2456 			list_move(&bo_base->vm_status, &vm->moved);
2457 			spin_unlock(&bo_base->vm->moved_lock);
2458 		}
2459 	}
2460 }
2461 
2462 /**
2463  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2464  *
2465  * @vm_size: VM size
2466  *
2467  * Returns:
2468  * VM page table as power of two
2469  */
amdgpu_vm_get_block_size(uint64_t vm_size)2470 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2471 {
2472 	/* Total bits covered by PD + PTs */
2473 	unsigned bits = ilog2(vm_size) + 18;
2474 
2475 	/* Make sure the PD is 4K in size up to 8GB address space.
2476 	   Above that split equal between PD and PTs */
2477 	if (vm_size <= 8)
2478 		return (bits - 9);
2479 	else
2480 		return ((bits + 3) / 2);
2481 }
2482 
2483 /**
2484  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2485  *
2486  * @adev: amdgpu_device pointer
2487  * @min_vm_size: the minimum vm size in GB if it's set auto
2488  * @fragment_size_default: Default PTE fragment size
2489  * @max_level: max VMPT level
2490  * @max_bits: max address space size in bits
2491  *
2492  */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2493 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2494 			   uint32_t fragment_size_default, unsigned max_level,
2495 			   unsigned max_bits)
2496 {
2497 	unsigned int max_size = 1 << (max_bits - 30);
2498 	unsigned int vm_size;
2499 	uint64_t tmp;
2500 
2501 	/* adjust vm size first */
2502 	if (amdgpu_vm_size != -1) {
2503 		vm_size = amdgpu_vm_size;
2504 		if (vm_size > max_size) {
2505 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2506 				 amdgpu_vm_size, max_size);
2507 			vm_size = max_size;
2508 		}
2509 	} else {
2510 		struct sysinfo si;
2511 		unsigned int phys_ram_gb;
2512 
2513 		/* Optimal VM size depends on the amount of physical
2514 		 * RAM available. Underlying requirements and
2515 		 * assumptions:
2516 		 *
2517 		 *  - Need to map system memory and VRAM from all GPUs
2518 		 *     - VRAM from other GPUs not known here
2519 		 *     - Assume VRAM <= system memory
2520 		 *  - On GFX8 and older, VM space can be segmented for
2521 		 *    different MTYPEs
2522 		 *  - Need to allow room for fragmentation, guard pages etc.
2523 		 *
2524 		 * This adds up to a rough guess of system memory x3.
2525 		 * Round up to power of two to maximize the available
2526 		 * VM size with the given page table size.
2527 		 */
2528 		si_meminfo(&si);
2529 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2530 			       (1 << 30) - 1) >> 30;
2531 		vm_size = roundup_pow_of_two(
2532 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2533 	}
2534 
2535 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2536 
2537 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2538 	if (amdgpu_vm_block_size != -1)
2539 		tmp >>= amdgpu_vm_block_size - 9;
2540 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2541 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2542 	switch (adev->vm_manager.num_level) {
2543 	case 3:
2544 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2545 		break;
2546 	case 2:
2547 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2548 		break;
2549 	case 1:
2550 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2551 		break;
2552 	default:
2553 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2554 	}
2555 	/* block size depends on vm size and hw setup*/
2556 	if (amdgpu_vm_block_size != -1)
2557 		adev->vm_manager.block_size =
2558 			min((unsigned)amdgpu_vm_block_size, max_bits
2559 			    - AMDGPU_GPU_PAGE_SHIFT
2560 			    - 9 * adev->vm_manager.num_level);
2561 	else if (adev->vm_manager.num_level > 1)
2562 		adev->vm_manager.block_size = 9;
2563 	else
2564 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2565 
2566 	if (amdgpu_vm_fragment_size == -1)
2567 		adev->vm_manager.fragment_size = fragment_size_default;
2568 	else
2569 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2570 
2571 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2572 		 vm_size, adev->vm_manager.num_level + 1,
2573 		 adev->vm_manager.block_size,
2574 		 adev->vm_manager.fragment_size);
2575 }
2576 
2577 /**
2578  * amdgpu_vm_init - initialize a vm instance
2579  *
2580  * @adev: amdgpu_device pointer
2581  * @vm: requested vm
2582  * @vm_context: Indicates if it GFX or Compute context
2583  * @pasid: Process address space identifier
2584  *
2585  * Init @vm fields.
2586  *
2587  * Returns:
2588  * 0 for success, error for failure.
2589  */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm,int vm_context,unsigned int pasid)2590 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2591 		   int vm_context, unsigned int pasid)
2592 {
2593 	struct amdgpu_bo_param bp;
2594 	struct amdgpu_bo *root;
2595 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2596 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2597 	unsigned ring_instance;
2598 	struct amdgpu_ring *ring;
2599 	struct drm_sched_rq *rq;
2600 	unsigned long size;
2601 	uint64_t flags;
2602 	int r, i;
2603 
2604 	vm->va = RB_ROOT_CACHED;
2605 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2606 		vm->reserved_vmid[i] = NULL;
2607 	INIT_LIST_HEAD(&vm->evicted);
2608 	INIT_LIST_HEAD(&vm->relocated);
2609 	spin_lock_init(&vm->moved_lock);
2610 	INIT_LIST_HEAD(&vm->moved);
2611 	INIT_LIST_HEAD(&vm->idle);
2612 	INIT_LIST_HEAD(&vm->freed);
2613 
2614 	/* create scheduler entity for page table updates */
2615 
2616 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2617 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2618 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2619 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2620 	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2621 	if (r)
2622 		return r;
2623 
2624 	vm->pte_support_ats = false;
2625 
2626 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2627 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2628 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2629 
2630 		if (adev->asic_type == CHIP_RAVEN)
2631 			vm->pte_support_ats = true;
2632 	} else {
2633 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2634 						AMDGPU_VM_USE_CPU_FOR_GFX);
2635 	}
2636 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2637 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2638 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2639 		  "CPU update of VM recommended only for large BAR system\n");
2640 	vm->last_update = NULL;
2641 
2642 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2643 	if (vm->use_cpu_for_update)
2644 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2645 	else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
2646 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2647 
2648 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2649 	memset(&bp, 0, sizeof(bp));
2650 	bp.size = size;
2651 	bp.byte_align = align;
2652 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2653 	bp.flags = flags;
2654 	bp.type = ttm_bo_type_kernel;
2655 	bp.resv = NULL;
2656 	r = amdgpu_bo_create(adev, &bp, &root);
2657 	if (r)
2658 		goto error_free_sched_entity;
2659 
2660 	r = amdgpu_bo_reserve(root, true);
2661 	if (r)
2662 		goto error_free_root;
2663 
2664 	r = amdgpu_vm_clear_bo(adev, vm, root,
2665 			       adev->vm_manager.root_level,
2666 			       vm->pte_support_ats);
2667 	if (r)
2668 		goto error_unreserve;
2669 
2670 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2671 	amdgpu_bo_unreserve(vm->root.base.bo);
2672 
2673 	if (pasid) {
2674 		unsigned long flags;
2675 
2676 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2677 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2678 			      GFP_ATOMIC);
2679 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2680 		if (r < 0)
2681 			goto error_free_root;
2682 
2683 		vm->pasid = pasid;
2684 	}
2685 
2686 	INIT_KFIFO(vm->faults);
2687 	vm->fault_credit = 16;
2688 
2689 	return 0;
2690 
2691 error_unreserve:
2692 	amdgpu_bo_unreserve(vm->root.base.bo);
2693 
2694 error_free_root:
2695 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2696 	amdgpu_bo_unref(&vm->root.base.bo);
2697 	vm->root.base.bo = NULL;
2698 
2699 error_free_sched_entity:
2700 	drm_sched_entity_destroy(&vm->entity);
2701 
2702 	return r;
2703 }
2704 
2705 /**
2706  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2707  *
2708  * @adev: amdgpu_device pointer
2709  * @vm: requested vm
2710  *
2711  * This only works on GFX VMs that don't have any BOs added and no
2712  * page tables allocated yet.
2713  *
2714  * Changes the following VM parameters:
2715  * - use_cpu_for_update
2716  * - pte_supports_ats
2717  * - pasid (old PASID is released, because compute manages its own PASIDs)
2718  *
2719  * Reinitializes the page directory to reflect the changed ATS
2720  * setting.
2721  *
2722  * Returns:
2723  * 0 for success, -errno for errors.
2724  */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2725 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2726 {
2727 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2728 	int r;
2729 
2730 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2731 	if (r)
2732 		return r;
2733 
2734 	/* Sanity checks */
2735 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2736 		r = -EINVAL;
2737 		goto error;
2738 	}
2739 
2740 	/* Check if PD needs to be reinitialized and do it before
2741 	 * changing any other state, in case it fails.
2742 	 */
2743 	if (pte_support_ats != vm->pte_support_ats) {
2744 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2745 			       adev->vm_manager.root_level,
2746 			       pte_support_ats);
2747 		if (r)
2748 			goto error;
2749 	}
2750 
2751 	/* Update VM state */
2752 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2753 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2754 	vm->pte_support_ats = pte_support_ats;
2755 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2756 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2757 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2758 		  "CPU update of VM recommended only for large BAR system\n");
2759 
2760 	if (vm->pasid) {
2761 		unsigned long flags;
2762 
2763 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2764 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2765 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2766 
2767 		vm->pasid = 0;
2768 	}
2769 
2770 	/* Free the shadow bo for compute VM */
2771 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2772 
2773 error:
2774 	amdgpu_bo_unreserve(vm->root.base.bo);
2775 	return r;
2776 }
2777 
2778 /**
2779  * amdgpu_vm_free_levels - free PD/PT levels
2780  *
2781  * @adev: amdgpu device structure
2782  * @parent: PD/PT starting level to free
2783  * @level: level of parent structure
2784  *
2785  * Free the page directory or page table level and all sub levels.
2786  */
amdgpu_vm_free_levels(struct amdgpu_device * adev,struct amdgpu_vm_pt * parent,unsigned level)2787 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2788 				  struct amdgpu_vm_pt *parent,
2789 				  unsigned level)
2790 {
2791 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2792 
2793 	if (parent->base.bo) {
2794 		list_del(&parent->base.bo_list);
2795 		list_del(&parent->base.vm_status);
2796 		amdgpu_bo_unref(&parent->base.bo->shadow);
2797 		amdgpu_bo_unref(&parent->base.bo);
2798 	}
2799 
2800 	if (parent->entries)
2801 		for (i = 0; i < num_entries; i++)
2802 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2803 					      level + 1);
2804 
2805 	kvfree(parent->entries);
2806 }
2807 
2808 /**
2809  * amdgpu_vm_fini - tear down a vm instance
2810  *
2811  * @adev: amdgpu_device pointer
2812  * @vm: requested vm
2813  *
2814  * Tear down @vm.
2815  * Unbind the VM and remove all bos from the vm bo list
2816  */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2817 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2818 {
2819 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2820 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2821 	struct amdgpu_bo *root;
2822 	u64 fault;
2823 	int i, r;
2824 
2825 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2826 
2827 	/* Clear pending page faults from IH when the VM is destroyed */
2828 	while (kfifo_get(&vm->faults, &fault))
2829 		amdgpu_ih_clear_fault(adev, fault);
2830 
2831 	if (vm->pasid) {
2832 		unsigned long flags;
2833 
2834 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2835 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2836 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2837 	}
2838 
2839 	drm_sched_entity_destroy(&vm->entity);
2840 
2841 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2842 		dev_err(adev->dev, "still active bo inside vm\n");
2843 	}
2844 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2845 					     &vm->va.rb_root, rb) {
2846 		list_del(&mapping->list);
2847 		amdgpu_vm_it_remove(mapping, &vm->va);
2848 		kfree(mapping);
2849 	}
2850 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2851 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2852 			amdgpu_vm_prt_fini(adev, vm);
2853 			prt_fini_needed = false;
2854 		}
2855 
2856 		list_del(&mapping->list);
2857 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2858 	}
2859 
2860 	root = amdgpu_bo_ref(vm->root.base.bo);
2861 	r = amdgpu_bo_reserve(root, true);
2862 	if (r) {
2863 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2864 	} else {
2865 		amdgpu_vm_free_levels(adev, &vm->root,
2866 				      adev->vm_manager.root_level);
2867 		amdgpu_bo_unreserve(root);
2868 	}
2869 	amdgpu_bo_unref(&root);
2870 	dma_fence_put(vm->last_update);
2871 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2872 		amdgpu_vmid_free_reserved(adev, vm, i);
2873 }
2874 
2875 /**
2876  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2877  *
2878  * @adev: amdgpu_device pointer
2879  * @pasid: PASID do identify the VM
2880  *
2881  * This function is expected to be called in interrupt context.
2882  *
2883  * Returns:
2884  * True if there was fault credit, false otherwise
2885  */
amdgpu_vm_pasid_fault_credit(struct amdgpu_device * adev,unsigned int pasid)2886 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2887 				  unsigned int pasid)
2888 {
2889 	struct amdgpu_vm *vm;
2890 
2891 	spin_lock(&adev->vm_manager.pasid_lock);
2892 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2893 	if (!vm) {
2894 		/* VM not found, can't track fault credit */
2895 		spin_unlock(&adev->vm_manager.pasid_lock);
2896 		return true;
2897 	}
2898 
2899 	/* No lock needed. only accessed by IRQ handler */
2900 	if (!vm->fault_credit) {
2901 		/* Too many faults in this VM */
2902 		spin_unlock(&adev->vm_manager.pasid_lock);
2903 		return false;
2904 	}
2905 
2906 	vm->fault_credit--;
2907 	spin_unlock(&adev->vm_manager.pasid_lock);
2908 	return true;
2909 }
2910 
2911 /**
2912  * amdgpu_vm_manager_init - init the VM manager
2913  *
2914  * @adev: amdgpu_device pointer
2915  *
2916  * Initialize the VM manager structures
2917  */
amdgpu_vm_manager_init(struct amdgpu_device * adev)2918 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2919 {
2920 	unsigned i;
2921 
2922 	amdgpu_vmid_mgr_init(adev);
2923 
2924 	adev->vm_manager.fence_context =
2925 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2926 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2927 		adev->vm_manager.seqno[i] = 0;
2928 
2929 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2930 	spin_lock_init(&adev->vm_manager.prt_lock);
2931 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2932 
2933 	/* If not overridden by the user, by default, only in large BAR systems
2934 	 * Compute VM tables will be updated by CPU
2935 	 */
2936 #ifdef CONFIG_X86_64
2937 	if (amdgpu_vm_update_mode == -1) {
2938 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2939 			adev->vm_manager.vm_update_mode =
2940 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2941 		else
2942 			adev->vm_manager.vm_update_mode = 0;
2943 	} else
2944 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2945 #else
2946 	adev->vm_manager.vm_update_mode = 0;
2947 #endif
2948 
2949 	idr_init(&adev->vm_manager.pasid_idr);
2950 	spin_lock_init(&adev->vm_manager.pasid_lock);
2951 }
2952 
2953 /**
2954  * amdgpu_vm_manager_fini - cleanup VM manager
2955  *
2956  * @adev: amdgpu_device pointer
2957  *
2958  * Cleanup the VM manager and free resources.
2959  */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)2960 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2961 {
2962 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2963 	idr_destroy(&adev->vm_manager.pasid_idr);
2964 
2965 	amdgpu_vmid_mgr_fini(adev);
2966 }
2967 
2968 /**
2969  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2970  *
2971  * @dev: drm device pointer
2972  * @data: drm_amdgpu_vm
2973  * @filp: drm file pointer
2974  *
2975  * Returns:
2976  * 0 for success, -errno for errors.
2977  */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)2978 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2979 {
2980 	union drm_amdgpu_vm *args = data;
2981 	struct amdgpu_device *adev = dev->dev_private;
2982 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2983 	int r;
2984 
2985 	switch (args->in.op) {
2986 	case AMDGPU_VM_OP_RESERVE_VMID:
2987 		/* current, we only have requirement to reserve vmid from gfxhub */
2988 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2989 		if (r)
2990 			return r;
2991 		break;
2992 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2993 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2994 		break;
2995 	default:
2996 		return -EINVAL;
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 /**
3003  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3004  *
3005  * @dev: drm device pointer
3006  * @pasid: PASID identifier for VM
3007  * @task_info: task_info to fill.
3008  */
amdgpu_vm_get_task_info(struct amdgpu_device * adev,unsigned int pasid,struct amdgpu_task_info * task_info)3009 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3010 			 struct amdgpu_task_info *task_info)
3011 {
3012 	struct amdgpu_vm *vm;
3013 
3014 	spin_lock(&adev->vm_manager.pasid_lock);
3015 
3016 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3017 	if (vm)
3018 		*task_info = vm->task_info;
3019 
3020 	spin_unlock(&adev->vm_manager.pasid_lock);
3021 }
3022 
3023 /**
3024  * amdgpu_vm_set_task_info - Sets VMs task info.
3025  *
3026  * @vm: vm for which to set the info
3027  */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)3028 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3029 {
3030 	if (!vm->task_info.pid) {
3031 		vm->task_info.pid = current->pid;
3032 		get_task_comm(vm->task_info.task_name, current);
3033 
3034 		if (current->group_leader->mm == current->mm) {
3035 			vm->task_info.tgid = current->group_leader->pid;
3036 			get_task_comm(vm->task_info.process_name, current->group_leader);
3037 		}
3038 	}
3039 }
3040