1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24 
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <drm/drmP.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 
33 /* Special VM and GART address alignment needed for VI pre-Fiji due to
34  * a HW bug.
35  */
36 #define VI_BO_SIZE_ALIGN (0x8000)
37 
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40 
41 /* Userptr restore delay, just long enough to allow consecutive VM
42  * changes to accumulate
43  */
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45 
46 /* Impose limit on how much memory KFD can use */
47 static struct {
48 	uint64_t max_system_mem_limit;
49 	uint64_t max_userptr_mem_limit;
50 	int64_t system_mem_used;
51 	int64_t userptr_mem_used;
52 	spinlock_t mem_limit_lock;
53 } kfd_mem_limit;
54 
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser {
57 	uint32_t        domain;
58 	bool            wait;
59 };
60 
61 static const char * const domain_bit_to_string[] = {
62 		"CPU",
63 		"GTT",
64 		"VRAM",
65 		"GDS",
66 		"GWS",
67 		"OA"
68 };
69 
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71 
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73 
74 
get_amdgpu_device(struct kgd_dev * kgd)75 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76 {
77 	return (struct amdgpu_device *)kgd;
78 }
79 
check_if_add_bo_to_vm(struct amdgpu_vm * avm,struct kgd_mem * mem)80 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81 		struct kgd_mem *mem)
82 {
83 	struct kfd_bo_va_list *entry;
84 
85 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86 		if (entry->bo_va->base.vm == avm)
87 			return false;
88 
89 	return true;
90 }
91 
92 /* Set memory usage limits. Current, limits are
93  *  System (kernel) memory - 3/8th System RAM
94  *  Userptr memory - 3/4th System RAM
95  */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97 {
98 	struct sysinfo si;
99 	uint64_t mem;
100 
101 	si_meminfo(&si);
102 	mem = si.totalram - si.totalhigh;
103 	mem *= si.mem_unit;
104 
105 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106 	kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
107 	kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
108 	pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109 		(kfd_mem_limit.max_system_mem_limit >> 20),
110 		(kfd_mem_limit.max_userptr_mem_limit >> 20));
111 }
112 
amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain)113 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
114 					      uint64_t size, u32 domain)
115 {
116 	size_t acc_size;
117 	int ret = 0;
118 
119 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
120 				       sizeof(struct amdgpu_bo));
121 
122 	spin_lock(&kfd_mem_limit.mem_limit_lock);
123 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124 		if (kfd_mem_limit.system_mem_used + (acc_size + size) >
125 			kfd_mem_limit.max_system_mem_limit) {
126 			ret = -ENOMEM;
127 			goto err_no_mem;
128 		}
129 		kfd_mem_limit.system_mem_used += (acc_size + size);
130 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
131 		if ((kfd_mem_limit.system_mem_used + acc_size >
132 			kfd_mem_limit.max_system_mem_limit) ||
133 			(kfd_mem_limit.userptr_mem_used + (size + acc_size) >
134 			kfd_mem_limit.max_userptr_mem_limit)) {
135 			ret = -ENOMEM;
136 			goto err_no_mem;
137 		}
138 		kfd_mem_limit.system_mem_used += acc_size;
139 		kfd_mem_limit.userptr_mem_used += size;
140 	}
141 err_no_mem:
142 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
143 	return ret;
144 }
145 
unreserve_system_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain)146 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
147 				       uint64_t size, u32 domain)
148 {
149 	size_t acc_size;
150 
151 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
152 				       sizeof(struct amdgpu_bo));
153 
154 	spin_lock(&kfd_mem_limit.mem_limit_lock);
155 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156 		kfd_mem_limit.system_mem_used -= (acc_size + size);
157 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
158 		kfd_mem_limit.system_mem_used -= acc_size;
159 		kfd_mem_limit.userptr_mem_used -= size;
160 	}
161 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
162 		  "kfd system memory accounting unbalanced");
163 	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
164 		  "kfd userptr memory accounting unbalanced");
165 
166 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
167 }
168 
amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo * bo)169 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
170 {
171 	spin_lock(&kfd_mem_limit.mem_limit_lock);
172 
173 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
174 		kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
175 		kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
176 	} else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
177 		kfd_mem_limit.system_mem_used -=
178 			(bo->tbo.acc_size + amdgpu_bo_size(bo));
179 	}
180 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
181 		  "kfd system memory accounting unbalanced");
182 	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
183 		  "kfd userptr memory accounting unbalanced");
184 
185 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
186 }
187 
188 
189 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
190  *  reservation object.
191  *
192  * @bo: [IN] Remove eviction fence(s) from this BO
193  * @ef: [IN] If ef is specified, then this eviction fence is removed if it
194  *  is present in the shared list.
195  * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
196  *  from BO's reservation object shared list.
197  * @ef_count: [OUT] Number of fences in ef_list.
198  *
199  * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
200  *  called to restore the eviction fences and to avoid memory leak. This is
201  *  useful for shared BOs.
202  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
203  */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef,struct amdgpu_amdkfd_fence *** ef_list,unsigned int * ef_count)204 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
205 					struct amdgpu_amdkfd_fence *ef,
206 					struct amdgpu_amdkfd_fence ***ef_list,
207 					unsigned int *ef_count)
208 {
209 	struct reservation_object *resv = bo->tbo.resv;
210 	struct reservation_object_list *old, *new;
211 	unsigned int i, j, k;
212 
213 	if (!ef && !ef_list)
214 		return -EINVAL;
215 
216 	if (ef_list) {
217 		*ef_list = NULL;
218 		*ef_count = 0;
219 	}
220 
221 	old = reservation_object_get_list(resv);
222 	if (!old)
223 		return 0;
224 
225 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
226 		      GFP_KERNEL);
227 	if (!new)
228 		return -ENOMEM;
229 
230 	/* Go through all the shared fences in the resevation object and sort
231 	 * the interesting ones to the end of the list.
232 	 */
233 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
234 		struct dma_fence *f;
235 
236 		f = rcu_dereference_protected(old->shared[i],
237 					      reservation_object_held(resv));
238 
239 		if ((ef && f->context == ef->base.context) ||
240 		    (!ef && to_amdgpu_amdkfd_fence(f)))
241 			RCU_INIT_POINTER(new->shared[--j], f);
242 		else
243 			RCU_INIT_POINTER(new->shared[k++], f);
244 	}
245 	new->shared_max = old->shared_max;
246 	new->shared_count = k;
247 
248 	if (!ef) {
249 		unsigned int count = old->shared_count - j;
250 
251 		/* Alloc memory for count number of eviction fence pointers.
252 		 * Fill the ef_list array and ef_count
253 		 */
254 		*ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
255 		*ef_count = count;
256 
257 		if (!*ef_list) {
258 			kfree(new);
259 			return -ENOMEM;
260 		}
261 	}
262 
263 	/* Install the new fence list, seqcount provides the barriers */
264 	preempt_disable();
265 	write_seqcount_begin(&resv->seq);
266 	RCU_INIT_POINTER(resv->fence, new);
267 	write_seqcount_end(&resv->seq);
268 	preempt_enable();
269 
270 	/* Drop the references to the removed fences or move them to ef_list */
271 	for (i = j, k = 0; i < old->shared_count; ++i) {
272 		struct dma_fence *f;
273 
274 		f = rcu_dereference_protected(new->shared[i],
275 					      reservation_object_held(resv));
276 		if (!ef)
277 			(*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
278 		else
279 			dma_fence_put(f);
280 	}
281 	kfree_rcu(old, rcu);
282 
283 	return 0;
284 }
285 
286 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
287  *  reservation object.
288  *
289  * @bo: [IN] Add eviction fences to this BO
290  * @ef_list: [IN] List of eviction fences to be added
291  * @ef_count: [IN] Number of fences in ef_list.
292  *
293  * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
294  *  function.
295  */
amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence ** ef_list,unsigned int ef_count)296 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
297 				struct amdgpu_amdkfd_fence **ef_list,
298 				unsigned int ef_count)
299 {
300 	int i;
301 
302 	if (!ef_list || !ef_count)
303 		return;
304 
305 	for (i = 0; i < ef_count; i++) {
306 		amdgpu_bo_fence(bo, &ef_list[i]->base, true);
307 		/* Re-adding the fence takes an additional reference. Drop that
308 		 * reference.
309 		 */
310 		dma_fence_put(&ef_list[i]->base);
311 	}
312 
313 	kfree(ef_list);
314 }
315 
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
317 				     bool wait)
318 {
319 	struct ttm_operation_ctx ctx = { false, false };
320 	int ret;
321 
322 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
323 		 "Called with userptr BO"))
324 		return -EINVAL;
325 
326 	amdgpu_bo_placement_from_domain(bo, domain);
327 
328 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
329 	if (ret)
330 		goto validate_fail;
331 	if (wait) {
332 		struct amdgpu_amdkfd_fence **ef_list;
333 		unsigned int ef_count;
334 
335 		ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
336 							  &ef_count);
337 		if (ret)
338 			goto validate_fail;
339 
340 		ttm_bo_wait(&bo->tbo, false, false);
341 		amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
342 	}
343 
344 validate_fail:
345 	return ret;
346 }
347 
amdgpu_amdkfd_validate(void * param,struct amdgpu_bo * bo)348 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
349 {
350 	struct amdgpu_vm_parser *p = param;
351 
352 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
353 }
354 
355 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
356  *
357  * Page directories are not updated here because huge page handling
358  * during page table updates can invalidate page directory entries
359  * again. Page directories are only updated after updating page
360  * tables.
361  */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm)362 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
363 {
364 	struct amdgpu_bo *pd = vm->root.base.bo;
365 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
366 	struct amdgpu_vm_parser param;
367 	uint64_t addr, flags = AMDGPU_PTE_VALID;
368 	int ret;
369 
370 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
371 	param.wait = false;
372 
373 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
374 					&param);
375 	if (ret) {
376 		pr_err("amdgpu: failed to validate PT BOs\n");
377 		return ret;
378 	}
379 
380 	ret = amdgpu_amdkfd_validate(&param, pd);
381 	if (ret) {
382 		pr_err("amdgpu: failed to validate PD\n");
383 		return ret;
384 	}
385 
386 	addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
387 	amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
388 	vm->pd_phys_addr = addr;
389 
390 	if (vm->use_cpu_for_update) {
391 		ret = amdgpu_bo_kmap(pd, NULL);
392 		if (ret) {
393 			pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
394 			return ret;
395 		}
396 	}
397 
398 	return 0;
399 }
400 
sync_vm_fence(struct amdgpu_device * adev,struct amdgpu_sync * sync,struct dma_fence * f)401 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
402 			 struct dma_fence *f)
403 {
404 	int ret = amdgpu_sync_fence(adev, sync, f, false);
405 
406 	/* Sync objects can't handle multiple GPUs (contexts) updating
407 	 * sync->last_vm_update. Fortunately we don't need it for
408 	 * KFD's purposes, so we can just drop that fence.
409 	 */
410 	if (sync->last_vm_update) {
411 		dma_fence_put(sync->last_vm_update);
412 		sync->last_vm_update = NULL;
413 	}
414 
415 	return ret;
416 }
417 
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)418 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
419 {
420 	struct amdgpu_bo *pd = vm->root.base.bo;
421 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
422 	int ret;
423 
424 	ret = amdgpu_vm_update_directories(adev, vm);
425 	if (ret)
426 		return ret;
427 
428 	return sync_vm_fence(adev, sync, vm->last_update);
429 }
430 
431 /* add_bo_to_vm - Add a BO to a VM
432  *
433  * Everything that needs to bo done only once when a BO is first added
434  * to a VM. It can later be mapped and unmapped many times without
435  * repeating these steps.
436  *
437  * 1. Allocate and initialize BO VA entry data structure
438  * 2. Add BO to the VM
439  * 3. Determine ASIC-specific PTE flags
440  * 4. Alloc page tables and directories if needed
441  * 4a.  Validate new page tables and directories
442  */
add_bo_to_vm(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql,struct kfd_bo_va_list ** p_bo_va_entry)443 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
444 		struct amdgpu_vm *vm, bool is_aql,
445 		struct kfd_bo_va_list **p_bo_va_entry)
446 {
447 	int ret;
448 	struct kfd_bo_va_list *bo_va_entry;
449 	struct amdgpu_bo *pd = vm->root.base.bo;
450 	struct amdgpu_bo *bo = mem->bo;
451 	uint64_t va = mem->va;
452 	struct list_head *list_bo_va = &mem->bo_va_list;
453 	unsigned long bo_size = bo->tbo.mem.size;
454 
455 	if (!va) {
456 		pr_err("Invalid VA when adding BO to VM\n");
457 		return -EINVAL;
458 	}
459 
460 	if (is_aql)
461 		va += bo_size;
462 
463 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
464 	if (!bo_va_entry)
465 		return -ENOMEM;
466 
467 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
468 			va + bo_size, vm);
469 
470 	/* Add BO to VM internal data structures*/
471 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
472 	if (!bo_va_entry->bo_va) {
473 		ret = -EINVAL;
474 		pr_err("Failed to add BO object to VM. ret == %d\n",
475 				ret);
476 		goto err_vmadd;
477 	}
478 
479 	bo_va_entry->va = va;
480 	bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
481 							 mem->mapping_flags);
482 	bo_va_entry->kgd_dev = (void *)adev;
483 	list_add(&bo_va_entry->bo_list, list_bo_va);
484 
485 	if (p_bo_va_entry)
486 		*p_bo_va_entry = bo_va_entry;
487 
488 	/* Allocate new page tables if needed and validate
489 	 * them. Clearing of new page tables and validate need to wait
490 	 * on move fences. We don't want that to trigger the eviction
491 	 * fence, so remove it temporarily.
492 	 */
493 	amdgpu_amdkfd_remove_eviction_fence(pd,
494 					vm->process_info->eviction_fence,
495 					NULL, NULL);
496 
497 	ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
498 	if (ret) {
499 		pr_err("Failed to allocate pts, err=%d\n", ret);
500 		goto err_alloc_pts;
501 	}
502 
503 	ret = vm_validate_pt_pd_bos(vm);
504 	if (ret) {
505 		pr_err("validate_pt_pd_bos() failed\n");
506 		goto err_alloc_pts;
507 	}
508 
509 	/* Add the eviction fence back */
510 	amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
511 
512 	return 0;
513 
514 err_alloc_pts:
515 	amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
516 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
517 	list_del(&bo_va_entry->bo_list);
518 err_vmadd:
519 	kfree(bo_va_entry);
520 	return ret;
521 }
522 
remove_bo_from_vm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,unsigned long size)523 static void remove_bo_from_vm(struct amdgpu_device *adev,
524 		struct kfd_bo_va_list *entry, unsigned long size)
525 {
526 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
527 			entry->va,
528 			entry->va + size, entry);
529 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
530 	list_del(&entry->bo_list);
531 	kfree(entry);
532 }
533 
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)534 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
535 				struct amdkfd_process_info *process_info,
536 				bool userptr)
537 {
538 	struct ttm_validate_buffer *entry = &mem->validate_list;
539 	struct amdgpu_bo *bo = mem->bo;
540 
541 	INIT_LIST_HEAD(&entry->head);
542 	entry->shared = true;
543 	entry->bo = &bo->tbo;
544 	mutex_lock(&process_info->lock);
545 	if (userptr)
546 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
547 	else
548 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
549 	mutex_unlock(&process_info->lock);
550 }
551 
552 /* Initializes user pages. It registers the MMU notifier and validates
553  * the userptr BO in the GTT domain.
554  *
555  * The BO must already be on the userptr_valid_list. Otherwise an
556  * eviction and restore may happen that leaves the new BO unmapped
557  * with the user mode queues running.
558  *
559  * Takes the process_info->lock to protect against concurrent restore
560  * workers.
561  *
562  * Returns 0 for success, negative errno for errors.
563  */
init_user_pages(struct kgd_mem * mem,struct mm_struct * mm,uint64_t user_addr)564 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
565 			   uint64_t user_addr)
566 {
567 	struct amdkfd_process_info *process_info = mem->process_info;
568 	struct amdgpu_bo *bo = mem->bo;
569 	struct ttm_operation_ctx ctx = { true, false };
570 	int ret = 0;
571 
572 	mutex_lock(&process_info->lock);
573 
574 	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
575 	if (ret) {
576 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
577 		goto out;
578 	}
579 
580 	ret = amdgpu_mn_register(bo, user_addr);
581 	if (ret) {
582 		pr_err("%s: Failed to register MMU notifier: %d\n",
583 		       __func__, ret);
584 		goto out;
585 	}
586 
587 	/* If no restore worker is running concurrently, user_pages
588 	 * should not be allocated
589 	 */
590 	WARN(mem->user_pages, "Leaking user_pages array");
591 
592 	mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
593 					   sizeof(struct page *),
594 					   GFP_KERNEL | __GFP_ZERO);
595 	if (!mem->user_pages) {
596 		pr_err("%s: Failed to allocate pages array\n", __func__);
597 		ret = -ENOMEM;
598 		goto unregister_out;
599 	}
600 
601 	ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
602 	if (ret) {
603 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
604 		goto free_out;
605 	}
606 
607 	amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
608 
609 	ret = amdgpu_bo_reserve(bo, true);
610 	if (ret) {
611 		pr_err("%s: Failed to reserve BO\n", __func__);
612 		goto release_out;
613 	}
614 	amdgpu_bo_placement_from_domain(bo, mem->domain);
615 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
616 	if (ret)
617 		pr_err("%s: failed to validate BO\n", __func__);
618 	amdgpu_bo_unreserve(bo);
619 
620 release_out:
621 	if (ret)
622 		release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
623 free_out:
624 	kvfree(mem->user_pages);
625 	mem->user_pages = NULL;
626 unregister_out:
627 	if (ret)
628 		amdgpu_mn_unregister(bo);
629 out:
630 	mutex_unlock(&process_info->lock);
631 	return ret;
632 }
633 
634 /* Reserving a BO and its page table BOs must happen atomically to
635  * avoid deadlocks. Some operations update multiple VMs at once. Track
636  * all the reservation info in a context structure. Optionally a sync
637  * object can track VM updates.
638  */
639 struct bo_vm_reservation_context {
640 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
641 	unsigned int n_vms;		    /* Number of VMs reserved	    */
642 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
643 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
644 	struct list_head list, duplicates;  /* BO lists			    */
645 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
646 	bool reserved;			    /* Whether BOs are reserved	    */
647 };
648 
649 enum bo_vm_match {
650 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
651 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
652 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
653 };
654 
655 /**
656  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
657  * @mem: KFD BO structure.
658  * @vm: the VM to reserve.
659  * @ctx: the struct that will be used in unreserve_bo_and_vms().
660  */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)661 static int reserve_bo_and_vm(struct kgd_mem *mem,
662 			      struct amdgpu_vm *vm,
663 			      struct bo_vm_reservation_context *ctx)
664 {
665 	struct amdgpu_bo *bo = mem->bo;
666 	int ret;
667 
668 	WARN_ON(!vm);
669 
670 	ctx->reserved = false;
671 	ctx->n_vms = 1;
672 	ctx->sync = &mem->sync;
673 
674 	INIT_LIST_HEAD(&ctx->list);
675 	INIT_LIST_HEAD(&ctx->duplicates);
676 
677 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
678 	if (!ctx->vm_pd)
679 		return -ENOMEM;
680 
681 	ctx->kfd_bo.robj = bo;
682 	ctx->kfd_bo.priority = 0;
683 	ctx->kfd_bo.tv.bo = &bo->tbo;
684 	ctx->kfd_bo.tv.shared = true;
685 	ctx->kfd_bo.user_pages = NULL;
686 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
687 
688 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
689 
690 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
691 				     false, &ctx->duplicates);
692 	if (!ret)
693 		ctx->reserved = true;
694 	else {
695 		pr_err("Failed to reserve buffers in ttm\n");
696 		kfree(ctx->vm_pd);
697 		ctx->vm_pd = NULL;
698 	}
699 
700 	return ret;
701 }
702 
703 /**
704  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
705  * @mem: KFD BO structure.
706  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
707  * is used. Otherwise, a single VM associated with the BO.
708  * @map_type: the mapping status that will be used to filter the VMs.
709  * @ctx: the struct that will be used in unreserve_bo_and_vms().
710  *
711  * Returns 0 for success, negative for failure.
712  */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)713 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
714 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
715 				struct bo_vm_reservation_context *ctx)
716 {
717 	struct amdgpu_bo *bo = mem->bo;
718 	struct kfd_bo_va_list *entry;
719 	unsigned int i;
720 	int ret;
721 
722 	ctx->reserved = false;
723 	ctx->n_vms = 0;
724 	ctx->vm_pd = NULL;
725 	ctx->sync = &mem->sync;
726 
727 	INIT_LIST_HEAD(&ctx->list);
728 	INIT_LIST_HEAD(&ctx->duplicates);
729 
730 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
731 		if ((vm && vm != entry->bo_va->base.vm) ||
732 			(entry->is_mapped != map_type
733 			&& map_type != BO_VM_ALL))
734 			continue;
735 
736 		ctx->n_vms++;
737 	}
738 
739 	if (ctx->n_vms != 0) {
740 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
741 				     GFP_KERNEL);
742 		if (!ctx->vm_pd)
743 			return -ENOMEM;
744 	}
745 
746 	ctx->kfd_bo.robj = bo;
747 	ctx->kfd_bo.priority = 0;
748 	ctx->kfd_bo.tv.bo = &bo->tbo;
749 	ctx->kfd_bo.tv.shared = true;
750 	ctx->kfd_bo.user_pages = NULL;
751 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
752 
753 	i = 0;
754 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
755 		if ((vm && vm != entry->bo_va->base.vm) ||
756 			(entry->is_mapped != map_type
757 			&& map_type != BO_VM_ALL))
758 			continue;
759 
760 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
761 				&ctx->vm_pd[i]);
762 		i++;
763 	}
764 
765 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
766 				     false, &ctx->duplicates);
767 	if (!ret)
768 		ctx->reserved = true;
769 	else
770 		pr_err("Failed to reserve buffers in ttm.\n");
771 
772 	if (ret) {
773 		kfree(ctx->vm_pd);
774 		ctx->vm_pd = NULL;
775 	}
776 
777 	return ret;
778 }
779 
780 /**
781  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
782  * @ctx: Reservation context to unreserve
783  * @wait: Optionally wait for a sync object representing pending VM updates
784  * @intr: Whether the wait is interruptible
785  *
786  * Also frees any resources allocated in
787  * reserve_bo_and_(cond_)vm(s). Returns the status from
788  * amdgpu_sync_wait.
789  */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)790 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
791 				 bool wait, bool intr)
792 {
793 	int ret = 0;
794 
795 	if (wait)
796 		ret = amdgpu_sync_wait(ctx->sync, intr);
797 
798 	if (ctx->reserved)
799 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
800 	kfree(ctx->vm_pd);
801 
802 	ctx->sync = NULL;
803 
804 	ctx->reserved = false;
805 	ctx->vm_pd = NULL;
806 
807 	return ret;
808 }
809 
unmap_bo_from_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)810 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
811 				struct kfd_bo_va_list *entry,
812 				struct amdgpu_sync *sync)
813 {
814 	struct amdgpu_bo_va *bo_va = entry->bo_va;
815 	struct amdgpu_vm *vm = bo_va->base.vm;
816 	struct amdgpu_bo *pd = vm->root.base.bo;
817 
818 	/* Remove eviction fence from PD (and thereby from PTs too as
819 	 * they share the resv. object). Otherwise during PT update
820 	 * job (see amdgpu_vm_bo_update_mapping), eviction fence would
821 	 * get added to job->sync object and job execution would
822 	 * trigger the eviction fence.
823 	 */
824 	amdgpu_amdkfd_remove_eviction_fence(pd,
825 					    vm->process_info->eviction_fence,
826 					    NULL, NULL);
827 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
828 
829 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
830 
831 	/* Add the eviction fence back */
832 	amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
833 
834 	sync_vm_fence(adev, sync, bo_va->last_pt_update);
835 
836 	return 0;
837 }
838 
update_gpuvm_pte(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)839 static int update_gpuvm_pte(struct amdgpu_device *adev,
840 		struct kfd_bo_va_list *entry,
841 		struct amdgpu_sync *sync)
842 {
843 	int ret;
844 	struct amdgpu_vm *vm;
845 	struct amdgpu_bo_va *bo_va;
846 	struct amdgpu_bo *bo;
847 
848 	bo_va = entry->bo_va;
849 	vm = bo_va->base.vm;
850 	bo = bo_va->base.bo;
851 
852 	/* Update the page tables  */
853 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
854 	if (ret) {
855 		pr_err("amdgpu_vm_bo_update failed\n");
856 		return ret;
857 	}
858 
859 	return sync_vm_fence(adev, sync, bo_va->last_pt_update);
860 }
861 
map_bo_to_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync,bool no_update_pte)862 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
863 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
864 		bool no_update_pte)
865 {
866 	int ret;
867 
868 	/* Set virtual address for the allocation */
869 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
870 			       amdgpu_bo_size(entry->bo_va->base.bo),
871 			       entry->pte_flags);
872 	if (ret) {
873 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
874 				entry->va, ret);
875 		return ret;
876 	}
877 
878 	if (no_update_pte)
879 		return 0;
880 
881 	ret = update_gpuvm_pte(adev, entry, sync);
882 	if (ret) {
883 		pr_err("update_gpuvm_pte() failed\n");
884 		goto update_gpuvm_pte_failed;
885 	}
886 
887 	return 0;
888 
889 update_gpuvm_pte_failed:
890 	unmap_bo_from_gpuvm(adev, entry, sync);
891 	return ret;
892 }
893 
process_validate_vms(struct amdkfd_process_info * process_info)894 static int process_validate_vms(struct amdkfd_process_info *process_info)
895 {
896 	struct amdgpu_vm *peer_vm;
897 	int ret;
898 
899 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
900 			    vm_list_node) {
901 		ret = vm_validate_pt_pd_bos(peer_vm);
902 		if (ret)
903 			return ret;
904 	}
905 
906 	return 0;
907 }
908 
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)909 static int process_update_pds(struct amdkfd_process_info *process_info,
910 			      struct amdgpu_sync *sync)
911 {
912 	struct amdgpu_vm *peer_vm;
913 	int ret;
914 
915 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
916 			    vm_list_node) {
917 		ret = vm_update_pds(peer_vm, sync);
918 		if (ret)
919 			return ret;
920 	}
921 
922 	return 0;
923 }
924 
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)925 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
926 		       struct dma_fence **ef)
927 {
928 	struct amdkfd_process_info *info = NULL;
929 	int ret;
930 
931 	if (!*process_info) {
932 		info = kzalloc(sizeof(*info), GFP_KERNEL);
933 		if (!info)
934 			return -ENOMEM;
935 
936 		mutex_init(&info->lock);
937 		INIT_LIST_HEAD(&info->vm_list_head);
938 		INIT_LIST_HEAD(&info->kfd_bo_list);
939 		INIT_LIST_HEAD(&info->userptr_valid_list);
940 		INIT_LIST_HEAD(&info->userptr_inval_list);
941 
942 		info->eviction_fence =
943 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
944 						   current->mm);
945 		if (!info->eviction_fence) {
946 			pr_err("Failed to create eviction fence\n");
947 			ret = -ENOMEM;
948 			goto create_evict_fence_fail;
949 		}
950 
951 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
952 		atomic_set(&info->evicted_bos, 0);
953 		INIT_DELAYED_WORK(&info->restore_userptr_work,
954 				  amdgpu_amdkfd_restore_userptr_worker);
955 
956 		*process_info = info;
957 		*ef = dma_fence_get(&info->eviction_fence->base);
958 	}
959 
960 	vm->process_info = *process_info;
961 
962 	/* Validate page directory and attach eviction fence */
963 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
964 	if (ret)
965 		goto reserve_pd_fail;
966 	ret = vm_validate_pt_pd_bos(vm);
967 	if (ret) {
968 		pr_err("validate_pt_pd_bos() failed\n");
969 		goto validate_pd_fail;
970 	}
971 	ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
972 	if (ret)
973 		goto wait_pd_fail;
974 	amdgpu_bo_fence(vm->root.base.bo,
975 			&vm->process_info->eviction_fence->base, true);
976 	amdgpu_bo_unreserve(vm->root.base.bo);
977 
978 	/* Update process info */
979 	mutex_lock(&vm->process_info->lock);
980 	list_add_tail(&vm->vm_list_node,
981 			&(vm->process_info->vm_list_head));
982 	vm->process_info->n_vms++;
983 	mutex_unlock(&vm->process_info->lock);
984 
985 	return 0;
986 
987 wait_pd_fail:
988 validate_pd_fail:
989 	amdgpu_bo_unreserve(vm->root.base.bo);
990 reserve_pd_fail:
991 	vm->process_info = NULL;
992 	if (info) {
993 		/* Two fence references: one in info and one in *ef */
994 		dma_fence_put(&info->eviction_fence->base);
995 		dma_fence_put(*ef);
996 		*ef = NULL;
997 		*process_info = NULL;
998 		put_pid(info->pid);
999 create_evict_fence_fail:
1000 		mutex_destroy(&info->lock);
1001 		kfree(info);
1002 	}
1003 	return ret;
1004 }
1005 
amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev * kgd,void ** vm,void ** process_info,struct dma_fence ** ef)1006 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1007 					  void **process_info,
1008 					  struct dma_fence **ef)
1009 {
1010 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1011 	struct amdgpu_vm *new_vm;
1012 	int ret;
1013 
1014 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1015 	if (!new_vm)
1016 		return -ENOMEM;
1017 
1018 	/* Initialize AMDGPU part of the VM */
1019 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1020 	if (ret) {
1021 		pr_err("Failed init vm ret %d\n", ret);
1022 		goto amdgpu_vm_init_fail;
1023 	}
1024 
1025 	/* Initialize KFD part of the VM and process info */
1026 	ret = init_kfd_vm(new_vm, process_info, ef);
1027 	if (ret)
1028 		goto init_kfd_vm_fail;
1029 
1030 	*vm = (void *) new_vm;
1031 
1032 	return 0;
1033 
1034 init_kfd_vm_fail:
1035 	amdgpu_vm_fini(adev, new_vm);
1036 amdgpu_vm_init_fail:
1037 	kfree(new_vm);
1038 	return ret;
1039 }
1040 
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev * kgd,struct file * filp,void ** vm,void ** process_info,struct dma_fence ** ef)1041 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1042 					   struct file *filp,
1043 					   void **vm, void **process_info,
1044 					   struct dma_fence **ef)
1045 {
1046 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1047 	struct drm_file *drm_priv = filp->private_data;
1048 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1049 	struct amdgpu_vm *avm = &drv_priv->vm;
1050 	int ret;
1051 
1052 	/* Already a compute VM? */
1053 	if (avm->process_info)
1054 		return -EINVAL;
1055 
1056 	/* Convert VM into a compute VM */
1057 	ret = amdgpu_vm_make_compute(adev, avm);
1058 	if (ret)
1059 		return ret;
1060 
1061 	/* Initialize KFD part of the VM and process info */
1062 	ret = init_kfd_vm(avm, process_info, ef);
1063 	if (ret)
1064 		return ret;
1065 
1066 	*vm = (void *)avm;
1067 
1068 	return 0;
1069 }
1070 
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1071 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1072 				    struct amdgpu_vm *vm)
1073 {
1074 	struct amdkfd_process_info *process_info = vm->process_info;
1075 	struct amdgpu_bo *pd = vm->root.base.bo;
1076 
1077 	if (!process_info)
1078 		return;
1079 
1080 	/* Release eviction fence from PD */
1081 	amdgpu_bo_reserve(pd, false);
1082 	amdgpu_bo_fence(pd, NULL, false);
1083 	amdgpu_bo_unreserve(pd);
1084 
1085 	/* Update process info */
1086 	mutex_lock(&process_info->lock);
1087 	process_info->n_vms--;
1088 	list_del(&vm->vm_list_node);
1089 	mutex_unlock(&process_info->lock);
1090 
1091 	/* Release per-process resources when last compute VM is destroyed */
1092 	if (!process_info->n_vms) {
1093 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1094 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1095 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1096 
1097 		dma_fence_put(&process_info->eviction_fence->base);
1098 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1099 		put_pid(process_info->pid);
1100 		mutex_destroy(&process_info->lock);
1101 		kfree(process_info);
1102 	}
1103 }
1104 
amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev * kgd,void * vm)1105 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1106 {
1107 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1108 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1109 
1110 	if (WARN_ON(!kgd || !vm))
1111 		return;
1112 
1113 	pr_debug("Destroying process vm %p\n", vm);
1114 
1115 	/* Release the VM context */
1116 	amdgpu_vm_fini(adev, avm);
1117 	kfree(vm);
1118 }
1119 
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * vm)1120 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1121 {
1122 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1123 
1124 	return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1125 }
1126 
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct kgd_dev * kgd,uint64_t va,uint64_t size,void * vm,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags)1127 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1128 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1129 		void *vm, struct kgd_mem **mem,
1130 		uint64_t *offset, uint32_t flags)
1131 {
1132 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1133 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1134 	uint64_t user_addr = 0;
1135 	struct amdgpu_bo *bo;
1136 	struct amdgpu_bo_param bp;
1137 	int byte_align;
1138 	u32 domain, alloc_domain;
1139 	u64 alloc_flags;
1140 	uint32_t mapping_flags;
1141 	int ret;
1142 
1143 	/*
1144 	 * Check on which domain to allocate BO
1145 	 */
1146 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
1147 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1148 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1149 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1150 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1151 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1152 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
1153 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1154 		alloc_flags = 0;
1155 	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1156 		domain = AMDGPU_GEM_DOMAIN_GTT;
1157 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1158 		alloc_flags = 0;
1159 		if (!offset || !*offset)
1160 			return -EINVAL;
1161 		user_addr = *offset;
1162 	} else {
1163 		return -EINVAL;
1164 	}
1165 
1166 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1167 	if (!*mem)
1168 		return -ENOMEM;
1169 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1170 	mutex_init(&(*mem)->lock);
1171 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1172 
1173 	/* Workaround for AQL queue wraparound bug. Map the same
1174 	 * memory twice. That means we only actually allocate half
1175 	 * the memory.
1176 	 */
1177 	if ((*mem)->aql_queue)
1178 		size = size >> 1;
1179 
1180 	/* Workaround for TLB bug on older VI chips */
1181 	byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1182 			adev->asic_type != CHIP_FIJI &&
1183 			adev->asic_type != CHIP_POLARIS10 &&
1184 			adev->asic_type != CHIP_POLARIS11) ?
1185 			VI_BO_SIZE_ALIGN : 1;
1186 
1187 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
1188 	if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1189 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1190 	if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1191 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1192 	if (flags & ALLOC_MEM_FLAGS_COHERENT)
1193 		mapping_flags |= AMDGPU_VM_MTYPE_UC;
1194 	else
1195 		mapping_flags |= AMDGPU_VM_MTYPE_NC;
1196 	(*mem)->mapping_flags = mapping_flags;
1197 
1198 	amdgpu_sync_create(&(*mem)->sync);
1199 
1200 	ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1201 	if (ret) {
1202 		pr_debug("Insufficient system memory\n");
1203 		goto err_reserve_system_mem;
1204 	}
1205 
1206 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1207 			va, size, domain_string(alloc_domain));
1208 
1209 	memset(&bp, 0, sizeof(bp));
1210 	bp.size = size;
1211 	bp.byte_align = byte_align;
1212 	bp.domain = alloc_domain;
1213 	bp.flags = alloc_flags;
1214 	bp.type = ttm_bo_type_device;
1215 	bp.resv = NULL;
1216 	ret = amdgpu_bo_create(adev, &bp, &bo);
1217 	if (ret) {
1218 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1219 				domain_string(alloc_domain), ret);
1220 		goto err_bo_create;
1221 	}
1222 	bo->kfd_bo = *mem;
1223 	(*mem)->bo = bo;
1224 	if (user_addr)
1225 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1226 
1227 	(*mem)->va = va;
1228 	(*mem)->domain = domain;
1229 	(*mem)->mapped_to_gpu_memory = 0;
1230 	(*mem)->process_info = avm->process_info;
1231 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1232 
1233 	if (user_addr) {
1234 		ret = init_user_pages(*mem, current->mm, user_addr);
1235 		if (ret) {
1236 			mutex_lock(&avm->process_info->lock);
1237 			list_del(&(*mem)->validate_list.head);
1238 			mutex_unlock(&avm->process_info->lock);
1239 			goto allocate_init_user_pages_failed;
1240 		}
1241 	}
1242 
1243 	if (offset)
1244 		*offset = amdgpu_bo_mmap_offset(bo);
1245 
1246 	return 0;
1247 
1248 allocate_init_user_pages_failed:
1249 	amdgpu_bo_unref(&bo);
1250 	/* Don't unreserve system mem limit twice */
1251 	goto err_reserve_system_mem;
1252 err_bo_create:
1253 	unreserve_system_mem_limit(adev, size, alloc_domain);
1254 err_reserve_system_mem:
1255 	mutex_destroy(&(*mem)->lock);
1256 	kfree(*mem);
1257 	return ret;
1258 }
1259 
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct kgd_dev * kgd,struct kgd_mem * mem)1260 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1261 		struct kgd_dev *kgd, struct kgd_mem *mem)
1262 {
1263 	struct amdkfd_process_info *process_info = mem->process_info;
1264 	unsigned long bo_size = mem->bo->tbo.mem.size;
1265 	struct kfd_bo_va_list *entry, *tmp;
1266 	struct bo_vm_reservation_context ctx;
1267 	struct ttm_validate_buffer *bo_list_entry;
1268 	int ret;
1269 
1270 	mutex_lock(&mem->lock);
1271 
1272 	if (mem->mapped_to_gpu_memory > 0) {
1273 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1274 				mem->va, bo_size);
1275 		mutex_unlock(&mem->lock);
1276 		return -EBUSY;
1277 	}
1278 
1279 	mutex_unlock(&mem->lock);
1280 	/* lock is not needed after this, since mem is unused and will
1281 	 * be freed anyway
1282 	 */
1283 
1284 	/* No more MMU notifiers */
1285 	amdgpu_mn_unregister(mem->bo);
1286 
1287 	/* Make sure restore workers don't access the BO any more */
1288 	bo_list_entry = &mem->validate_list;
1289 	mutex_lock(&process_info->lock);
1290 	list_del(&bo_list_entry->head);
1291 	mutex_unlock(&process_info->lock);
1292 
1293 	/* Free user pages if necessary */
1294 	if (mem->user_pages) {
1295 		pr_debug("%s: Freeing user_pages array\n", __func__);
1296 		if (mem->user_pages[0])
1297 			release_pages(mem->user_pages,
1298 					mem->bo->tbo.ttm->num_pages);
1299 		kvfree(mem->user_pages);
1300 	}
1301 
1302 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1303 	if (unlikely(ret))
1304 		return ret;
1305 
1306 	/* The eviction fence should be removed by the last unmap.
1307 	 * TODO: Log an error condition if the bo still has the eviction fence
1308 	 * attached
1309 	 */
1310 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1311 					process_info->eviction_fence,
1312 					NULL, NULL);
1313 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1314 		mem->va + bo_size * (1 + mem->aql_queue));
1315 
1316 	/* Remove from VM internal data structures */
1317 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1318 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1319 				entry, bo_size);
1320 
1321 	ret = unreserve_bo_and_vms(&ctx, false, false);
1322 
1323 	/* Free the sync object */
1324 	amdgpu_sync_free(&mem->sync);
1325 
1326 	/* Free the BO*/
1327 	amdgpu_bo_unref(&mem->bo);
1328 	mutex_destroy(&mem->lock);
1329 	kfree(mem);
1330 
1331 	return ret;
1332 }
1333 
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1334 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1335 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1336 {
1337 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1338 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1339 	int ret;
1340 	struct amdgpu_bo *bo;
1341 	uint32_t domain;
1342 	struct kfd_bo_va_list *entry;
1343 	struct bo_vm_reservation_context ctx;
1344 	struct kfd_bo_va_list *bo_va_entry = NULL;
1345 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1346 	unsigned long bo_size;
1347 	bool is_invalid_userptr = false;
1348 
1349 	bo = mem->bo;
1350 	if (!bo) {
1351 		pr_err("Invalid BO when mapping memory to GPU\n");
1352 		return -EINVAL;
1353 	}
1354 
1355 	/* Make sure restore is not running concurrently. Since we
1356 	 * don't map invalid userptr BOs, we rely on the next restore
1357 	 * worker to do the mapping
1358 	 */
1359 	mutex_lock(&mem->process_info->lock);
1360 
1361 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1362 	 * sure that the MMU notifier is no longer running
1363 	 * concurrently and the queues are actually stopped
1364 	 */
1365 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1366 		down_write(&current->mm->mmap_sem);
1367 		is_invalid_userptr = atomic_read(&mem->invalid);
1368 		up_write(&current->mm->mmap_sem);
1369 	}
1370 
1371 	mutex_lock(&mem->lock);
1372 
1373 	domain = mem->domain;
1374 	bo_size = bo->tbo.mem.size;
1375 
1376 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1377 			mem->va,
1378 			mem->va + bo_size * (1 + mem->aql_queue),
1379 			vm, domain_string(domain));
1380 
1381 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1382 	if (unlikely(ret))
1383 		goto out;
1384 
1385 	/* Userptr can be marked as "not invalid", but not actually be
1386 	 * validated yet (still in the system domain). In that case
1387 	 * the queues are still stopped and we can leave mapping for
1388 	 * the next restore worker
1389 	 */
1390 	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1391 		is_invalid_userptr = true;
1392 
1393 	if (check_if_add_bo_to_vm(avm, mem)) {
1394 		ret = add_bo_to_vm(adev, mem, avm, false,
1395 				&bo_va_entry);
1396 		if (ret)
1397 			goto add_bo_to_vm_failed;
1398 		if (mem->aql_queue) {
1399 			ret = add_bo_to_vm(adev, mem, avm,
1400 					true, &bo_va_entry_aql);
1401 			if (ret)
1402 				goto add_bo_to_vm_failed_aql;
1403 		}
1404 	} else {
1405 		ret = vm_validate_pt_pd_bos(avm);
1406 		if (unlikely(ret))
1407 			goto add_bo_to_vm_failed;
1408 	}
1409 
1410 	if (mem->mapped_to_gpu_memory == 0 &&
1411 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1412 		/* Validate BO only once. The eviction fence gets added to BO
1413 		 * the first time it is mapped. Validate will wait for all
1414 		 * background evictions to complete.
1415 		 */
1416 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1417 		if (ret) {
1418 			pr_debug("Validate failed\n");
1419 			goto map_bo_to_gpuvm_failed;
1420 		}
1421 	}
1422 
1423 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1424 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1425 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1426 					entry->va, entry->va + bo_size,
1427 					entry);
1428 
1429 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1430 					      is_invalid_userptr);
1431 			if (ret) {
1432 				pr_err("Failed to map radeon bo to gpuvm\n");
1433 				goto map_bo_to_gpuvm_failed;
1434 			}
1435 
1436 			ret = vm_update_pds(vm, ctx.sync);
1437 			if (ret) {
1438 				pr_err("Failed to update page directories\n");
1439 				goto map_bo_to_gpuvm_failed;
1440 			}
1441 
1442 			entry->is_mapped = true;
1443 			mem->mapped_to_gpu_memory++;
1444 			pr_debug("\t INC mapping count %d\n",
1445 					mem->mapped_to_gpu_memory);
1446 		}
1447 	}
1448 
1449 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1450 		amdgpu_bo_fence(bo,
1451 				&avm->process_info->eviction_fence->base,
1452 				true);
1453 	ret = unreserve_bo_and_vms(&ctx, false, false);
1454 
1455 	goto out;
1456 
1457 map_bo_to_gpuvm_failed:
1458 	if (bo_va_entry_aql)
1459 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1460 add_bo_to_vm_failed_aql:
1461 	if (bo_va_entry)
1462 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1463 add_bo_to_vm_failed:
1464 	unreserve_bo_and_vms(&ctx, false, false);
1465 out:
1466 	mutex_unlock(&mem->process_info->lock);
1467 	mutex_unlock(&mem->lock);
1468 	return ret;
1469 }
1470 
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1471 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1472 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1473 {
1474 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1475 	struct amdkfd_process_info *process_info =
1476 		((struct amdgpu_vm *)vm)->process_info;
1477 	unsigned long bo_size = mem->bo->tbo.mem.size;
1478 	struct kfd_bo_va_list *entry;
1479 	struct bo_vm_reservation_context ctx;
1480 	int ret;
1481 
1482 	mutex_lock(&mem->lock);
1483 
1484 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1485 	if (unlikely(ret))
1486 		goto out;
1487 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1488 	if (ctx.n_vms == 0) {
1489 		ret = -EINVAL;
1490 		goto unreserve_out;
1491 	}
1492 
1493 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1494 	if (unlikely(ret))
1495 		goto unreserve_out;
1496 
1497 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1498 		mem->va,
1499 		mem->va + bo_size * (1 + mem->aql_queue),
1500 		vm);
1501 
1502 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1503 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1504 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1505 					entry->va,
1506 					entry->va + bo_size,
1507 					entry);
1508 
1509 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1510 			if (ret == 0) {
1511 				entry->is_mapped = false;
1512 			} else {
1513 				pr_err("failed to unmap VA 0x%llx\n",
1514 						mem->va);
1515 				goto unreserve_out;
1516 			}
1517 
1518 			mem->mapped_to_gpu_memory--;
1519 			pr_debug("\t DEC mapping count %d\n",
1520 					mem->mapped_to_gpu_memory);
1521 		}
1522 	}
1523 
1524 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1525 	 * required.
1526 	 */
1527 	if (mem->mapped_to_gpu_memory == 0 &&
1528 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1529 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1530 						process_info->eviction_fence,
1531 						    NULL, NULL);
1532 
1533 unreserve_out:
1534 	unreserve_bo_and_vms(&ctx, false, false);
1535 out:
1536 	mutex_unlock(&mem->lock);
1537 	return ret;
1538 }
1539 
amdgpu_amdkfd_gpuvm_sync_memory(struct kgd_dev * kgd,struct kgd_mem * mem,bool intr)1540 int amdgpu_amdkfd_gpuvm_sync_memory(
1541 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1542 {
1543 	struct amdgpu_sync sync;
1544 	int ret;
1545 
1546 	amdgpu_sync_create(&sync);
1547 
1548 	mutex_lock(&mem->lock);
1549 	amdgpu_sync_clone(&mem->sync, &sync);
1550 	mutex_unlock(&mem->lock);
1551 
1552 	ret = amdgpu_sync_wait(&sync, intr);
1553 	amdgpu_sync_free(&sync);
1554 	return ret;
1555 }
1556 
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev * kgd,struct kgd_mem * mem,void ** kptr,uint64_t * size)1557 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1558 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1559 {
1560 	int ret;
1561 	struct amdgpu_bo *bo = mem->bo;
1562 
1563 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1564 		pr_err("userptr can't be mapped to kernel\n");
1565 		return -EINVAL;
1566 	}
1567 
1568 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1569 	 * this BO in BO's restoring after eviction.
1570 	 */
1571 	mutex_lock(&mem->process_info->lock);
1572 
1573 	ret = amdgpu_bo_reserve(bo, true);
1574 	if (ret) {
1575 		pr_err("Failed to reserve bo. ret %d\n", ret);
1576 		goto bo_reserve_failed;
1577 	}
1578 
1579 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1580 	if (ret) {
1581 		pr_err("Failed to pin bo. ret %d\n", ret);
1582 		goto pin_failed;
1583 	}
1584 
1585 	ret = amdgpu_bo_kmap(bo, kptr);
1586 	if (ret) {
1587 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1588 		goto kmap_failed;
1589 	}
1590 
1591 	amdgpu_amdkfd_remove_eviction_fence(
1592 		bo, mem->process_info->eviction_fence, NULL, NULL);
1593 	list_del_init(&mem->validate_list.head);
1594 
1595 	if (size)
1596 		*size = amdgpu_bo_size(bo);
1597 
1598 	amdgpu_bo_unreserve(bo);
1599 
1600 	mutex_unlock(&mem->process_info->lock);
1601 	return 0;
1602 
1603 kmap_failed:
1604 	amdgpu_bo_unpin(bo);
1605 pin_failed:
1606 	amdgpu_bo_unreserve(bo);
1607 bo_reserve_failed:
1608 	mutex_unlock(&mem->process_info->lock);
1609 
1610 	return ret;
1611 }
1612 
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev * kgd,struct kfd_vm_fault_info * mem)1613 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1614 					      struct kfd_vm_fault_info *mem)
1615 {
1616 	struct amdgpu_device *adev;
1617 
1618 	adev = (struct amdgpu_device *)kgd;
1619 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1620 		*mem = *adev->gmc.vm_fault_info;
1621 		mb();
1622 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1623 	}
1624 	return 0;
1625 }
1626 
1627 /* Evict a userptr BO by stopping the queues if necessary
1628  *
1629  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1630  * cannot do any memory allocations, and cannot take any locks that
1631  * are held elsewhere while allocating memory. Therefore this is as
1632  * simple as possible, using atomic counters.
1633  *
1634  * It doesn't do anything to the BO itself. The real work happens in
1635  * restore, where we get updated page addresses. This function only
1636  * ensures that GPU access to the BO is stopped.
1637  */
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)1638 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1639 				struct mm_struct *mm)
1640 {
1641 	struct amdkfd_process_info *process_info = mem->process_info;
1642 	int invalid, evicted_bos;
1643 	int r = 0;
1644 
1645 	invalid = atomic_inc_return(&mem->invalid);
1646 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1647 	if (evicted_bos == 1) {
1648 		/* First eviction, stop the queues */
1649 		r = kgd2kfd->quiesce_mm(mm);
1650 		if (r)
1651 			pr_err("Failed to quiesce KFD\n");
1652 		schedule_delayed_work(&process_info->restore_userptr_work,
1653 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1654 	}
1655 
1656 	return r;
1657 }
1658 
1659 /* Update invalid userptr BOs
1660  *
1661  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1662  * userptr_inval_list and updates user pages for all BOs that have
1663  * been invalidated since their last update.
1664  */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)1665 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1666 				     struct mm_struct *mm)
1667 {
1668 	struct kgd_mem *mem, *tmp_mem;
1669 	struct amdgpu_bo *bo;
1670 	struct ttm_operation_ctx ctx = { false, false };
1671 	int invalid, ret;
1672 
1673 	/* Move all invalidated BOs to the userptr_inval_list and
1674 	 * release their user pages by migration to the CPU domain
1675 	 */
1676 	list_for_each_entry_safe(mem, tmp_mem,
1677 				 &process_info->userptr_valid_list,
1678 				 validate_list.head) {
1679 		if (!atomic_read(&mem->invalid))
1680 			continue; /* BO is still valid */
1681 
1682 		bo = mem->bo;
1683 
1684 		if (amdgpu_bo_reserve(bo, true))
1685 			return -EAGAIN;
1686 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1687 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1688 		amdgpu_bo_unreserve(bo);
1689 		if (ret) {
1690 			pr_err("%s: Failed to invalidate userptr BO\n",
1691 			       __func__);
1692 			return -EAGAIN;
1693 		}
1694 
1695 		list_move_tail(&mem->validate_list.head,
1696 			       &process_info->userptr_inval_list);
1697 	}
1698 
1699 	if (list_empty(&process_info->userptr_inval_list))
1700 		return 0; /* All evicted userptr BOs were freed */
1701 
1702 	/* Go through userptr_inval_list and update any invalid user_pages */
1703 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1704 			    validate_list.head) {
1705 		invalid = atomic_read(&mem->invalid);
1706 		if (!invalid)
1707 			/* BO hasn't been invalidated since the last
1708 			 * revalidation attempt. Keep its BO list.
1709 			 */
1710 			continue;
1711 
1712 		bo = mem->bo;
1713 
1714 		if (!mem->user_pages) {
1715 			mem->user_pages =
1716 				kvmalloc_array(bo->tbo.ttm->num_pages,
1717 						 sizeof(struct page *),
1718 						 GFP_KERNEL | __GFP_ZERO);
1719 			if (!mem->user_pages) {
1720 				pr_err("%s: Failed to allocate pages array\n",
1721 				       __func__);
1722 				return -ENOMEM;
1723 			}
1724 		} else if (mem->user_pages[0]) {
1725 			release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1726 		}
1727 
1728 		/* Get updated user pages */
1729 		ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1730 						   mem->user_pages);
1731 		if (ret) {
1732 			mem->user_pages[0] = NULL;
1733 			pr_info("%s: Failed to get user pages: %d\n",
1734 				__func__, ret);
1735 			/* Pretend it succeeded. It will fail later
1736 			 * with a VM fault if the GPU tries to access
1737 			 * it. Better than hanging indefinitely with
1738 			 * stalled user mode queues.
1739 			 */
1740 		}
1741 
1742 		/* Mark the BO as valid unless it was invalidated
1743 		 * again concurrently
1744 		 */
1745 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1746 			return -EAGAIN;
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 /* Validate invalid userptr BOs
1753  *
1754  * Validates BOs on the userptr_inval_list, and moves them back to the
1755  * userptr_valid_list. Also updates GPUVM page tables with new page
1756  * addresses and waits for the page table updates to complete.
1757  */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)1758 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1759 {
1760 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1761 	struct list_head resv_list, duplicates;
1762 	struct ww_acquire_ctx ticket;
1763 	struct amdgpu_sync sync;
1764 
1765 	struct amdgpu_vm *peer_vm;
1766 	struct kgd_mem *mem, *tmp_mem;
1767 	struct amdgpu_bo *bo;
1768 	struct ttm_operation_ctx ctx = { false, false };
1769 	int i, ret;
1770 
1771 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1772 				     sizeof(struct amdgpu_bo_list_entry),
1773 				     GFP_KERNEL);
1774 	if (!pd_bo_list_entries) {
1775 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1776 		return -ENOMEM;
1777 	}
1778 
1779 	INIT_LIST_HEAD(&resv_list);
1780 	INIT_LIST_HEAD(&duplicates);
1781 
1782 	/* Get all the page directory BOs that need to be reserved */
1783 	i = 0;
1784 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1785 			    vm_list_node)
1786 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1787 				    &pd_bo_list_entries[i++]);
1788 	/* Add the userptr_inval_list entries to resv_list */
1789 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1790 			    validate_list.head) {
1791 		list_add_tail(&mem->resv_list.head, &resv_list);
1792 		mem->resv_list.bo = mem->validate_list.bo;
1793 		mem->resv_list.shared = mem->validate_list.shared;
1794 	}
1795 
1796 	/* Reserve all BOs and page tables for validation */
1797 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1798 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1799 	if (ret)
1800 		goto out;
1801 
1802 	amdgpu_sync_create(&sync);
1803 
1804 	/* Avoid triggering eviction fences when unmapping invalid
1805 	 * userptr BOs (waits for all fences, doesn't use
1806 	 * FENCE_OWNER_VM)
1807 	 */
1808 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1809 			    vm_list_node)
1810 		amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1811 						process_info->eviction_fence,
1812 						NULL, NULL);
1813 
1814 	ret = process_validate_vms(process_info);
1815 	if (ret)
1816 		goto unreserve_out;
1817 
1818 	/* Validate BOs and update GPUVM page tables */
1819 	list_for_each_entry_safe(mem, tmp_mem,
1820 				 &process_info->userptr_inval_list,
1821 				 validate_list.head) {
1822 		struct kfd_bo_va_list *bo_va_entry;
1823 
1824 		bo = mem->bo;
1825 
1826 		/* Copy pages array and validate the BO if we got user pages */
1827 		if (mem->user_pages[0]) {
1828 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1829 						     mem->user_pages);
1830 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1831 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1832 			if (ret) {
1833 				pr_err("%s: failed to validate BO\n", __func__);
1834 				goto unreserve_out;
1835 			}
1836 		}
1837 
1838 		/* Validate succeeded, now the BO owns the pages, free
1839 		 * our copy of the pointer array. Put this BO back on
1840 		 * the userptr_valid_list. If we need to revalidate
1841 		 * it, we need to start from scratch.
1842 		 */
1843 		kvfree(mem->user_pages);
1844 		mem->user_pages = NULL;
1845 		list_move_tail(&mem->validate_list.head,
1846 			       &process_info->userptr_valid_list);
1847 
1848 		/* Update mapping. If the BO was not validated
1849 		 * (because we couldn't get user pages), this will
1850 		 * clear the page table entries, which will result in
1851 		 * VM faults if the GPU tries to access the invalid
1852 		 * memory.
1853 		 */
1854 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1855 			if (!bo_va_entry->is_mapped)
1856 				continue;
1857 
1858 			ret = update_gpuvm_pte((struct amdgpu_device *)
1859 					       bo_va_entry->kgd_dev,
1860 					       bo_va_entry, &sync);
1861 			if (ret) {
1862 				pr_err("%s: update PTE failed\n", __func__);
1863 				/* make sure this gets validated again */
1864 				atomic_inc(&mem->invalid);
1865 				goto unreserve_out;
1866 			}
1867 		}
1868 	}
1869 
1870 	/* Update page directories */
1871 	ret = process_update_pds(process_info, &sync);
1872 
1873 unreserve_out:
1874 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1875 			    vm_list_node)
1876 		amdgpu_bo_fence(peer_vm->root.base.bo,
1877 				&process_info->eviction_fence->base, true);
1878 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1879 	amdgpu_sync_wait(&sync, false);
1880 	amdgpu_sync_free(&sync);
1881 out:
1882 	kfree(pd_bo_list_entries);
1883 
1884 	return ret;
1885 }
1886 
1887 /* Worker callback to restore evicted userptr BOs
1888  *
1889  * Tries to update and validate all userptr BOs. If successful and no
1890  * concurrent evictions happened, the queues are restarted. Otherwise,
1891  * reschedule for another attempt later.
1892  */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)1893 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1894 {
1895 	struct delayed_work *dwork = to_delayed_work(work);
1896 	struct amdkfd_process_info *process_info =
1897 		container_of(dwork, struct amdkfd_process_info,
1898 			     restore_userptr_work);
1899 	struct task_struct *usertask;
1900 	struct mm_struct *mm;
1901 	int evicted_bos;
1902 
1903 	evicted_bos = atomic_read(&process_info->evicted_bos);
1904 	if (!evicted_bos)
1905 		return;
1906 
1907 	/* Reference task and mm in case of concurrent process termination */
1908 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1909 	if (!usertask)
1910 		return;
1911 	mm = get_task_mm(usertask);
1912 	if (!mm) {
1913 		put_task_struct(usertask);
1914 		return;
1915 	}
1916 
1917 	mutex_lock(&process_info->lock);
1918 
1919 	if (update_invalid_user_pages(process_info, mm))
1920 		goto unlock_out;
1921 	/* userptr_inval_list can be empty if all evicted userptr BOs
1922 	 * have been freed. In that case there is nothing to validate
1923 	 * and we can just restart the queues.
1924 	 */
1925 	if (!list_empty(&process_info->userptr_inval_list)) {
1926 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1927 			goto unlock_out; /* Concurrent eviction, try again */
1928 
1929 		if (validate_invalid_user_pages(process_info))
1930 			goto unlock_out;
1931 	}
1932 	/* Final check for concurrent evicton and atomic update. If
1933 	 * another eviction happens after successful update, it will
1934 	 * be a first eviction that calls quiesce_mm. The eviction
1935 	 * reference counting inside KFD will handle this case.
1936 	 */
1937 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1938 	    evicted_bos)
1939 		goto unlock_out;
1940 	evicted_bos = 0;
1941 	if (kgd2kfd->resume_mm(mm)) {
1942 		pr_err("%s: Failed to resume KFD\n", __func__);
1943 		/* No recovery from this failure. Probably the CP is
1944 		 * hanging. No point trying again.
1945 		 */
1946 	}
1947 unlock_out:
1948 	mutex_unlock(&process_info->lock);
1949 	mmput(mm);
1950 	put_task_struct(usertask);
1951 
1952 	/* If validation failed, reschedule another attempt */
1953 	if (evicted_bos)
1954 		schedule_delayed_work(&process_info->restore_userptr_work,
1955 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1956 }
1957 
1958 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1959  *   KFD process identified by process_info
1960  *
1961  * @process_info: amdkfd_process_info of the KFD process
1962  *
1963  * After memory eviction, restore thread calls this function. The function
1964  * should be called when the Process is still valid. BO restore involves -
1965  *
1966  * 1.  Release old eviction fence and create new one
1967  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1968  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1969  *     BOs that need to be reserved.
1970  * 4.  Reserve all the BOs
1971  * 5.  Validate of PD and PT BOs.
1972  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1973  * 7.  Add fence to all PD and PT BOs.
1974  * 8.  Unreserve all BOs
1975  */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence ** ef)1976 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1977 {
1978 	struct amdgpu_bo_list_entry *pd_bo_list;
1979 	struct amdkfd_process_info *process_info = info;
1980 	struct amdgpu_vm *peer_vm;
1981 	struct kgd_mem *mem;
1982 	struct bo_vm_reservation_context ctx;
1983 	struct amdgpu_amdkfd_fence *new_fence;
1984 	int ret = 0, i;
1985 	struct list_head duplicate_save;
1986 	struct amdgpu_sync sync_obj;
1987 
1988 	INIT_LIST_HEAD(&duplicate_save);
1989 	INIT_LIST_HEAD(&ctx.list);
1990 	INIT_LIST_HEAD(&ctx.duplicates);
1991 
1992 	pd_bo_list = kcalloc(process_info->n_vms,
1993 			     sizeof(struct amdgpu_bo_list_entry),
1994 			     GFP_KERNEL);
1995 	if (!pd_bo_list)
1996 		return -ENOMEM;
1997 
1998 	i = 0;
1999 	mutex_lock(&process_info->lock);
2000 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2001 			vm_list_node)
2002 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2003 
2004 	/* Reserve all BOs and page tables/directory. Add all BOs from
2005 	 * kfd_bo_list to ctx.list
2006 	 */
2007 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2008 			    validate_list.head) {
2009 
2010 		list_add_tail(&mem->resv_list.head, &ctx.list);
2011 		mem->resv_list.bo = mem->validate_list.bo;
2012 		mem->resv_list.shared = mem->validate_list.shared;
2013 	}
2014 
2015 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2016 				     false, &duplicate_save);
2017 	if (ret) {
2018 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2019 		goto ttm_reserve_fail;
2020 	}
2021 
2022 	amdgpu_sync_create(&sync_obj);
2023 
2024 	/* Validate PDs and PTs */
2025 	ret = process_validate_vms(process_info);
2026 	if (ret)
2027 		goto validate_map_fail;
2028 
2029 	/* Wait for PD/PTs validate to finish */
2030 	/* FIXME: I think this isn't needed */
2031 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2032 			    vm_list_node) {
2033 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2034 
2035 		ttm_bo_wait(&bo->tbo, false, false);
2036 	}
2037 
2038 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2039 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2040 			    validate_list.head) {
2041 
2042 		struct amdgpu_bo *bo = mem->bo;
2043 		uint32_t domain = mem->domain;
2044 		struct kfd_bo_va_list *bo_va_entry;
2045 
2046 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2047 		if (ret) {
2048 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2049 			goto validate_map_fail;
2050 		}
2051 
2052 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2053 				    bo_list) {
2054 			ret = update_gpuvm_pte((struct amdgpu_device *)
2055 					      bo_va_entry->kgd_dev,
2056 					      bo_va_entry,
2057 					      &sync_obj);
2058 			if (ret) {
2059 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2060 				goto validate_map_fail;
2061 			}
2062 		}
2063 	}
2064 
2065 	/* Update page directories */
2066 	ret = process_update_pds(process_info, &sync_obj);
2067 	if (ret) {
2068 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2069 		goto validate_map_fail;
2070 	}
2071 
2072 	amdgpu_sync_wait(&sync_obj, false);
2073 
2074 	/* Release old eviction fence and create new one, because fence only
2075 	 * goes from unsignaled to signaled, fence cannot be reused.
2076 	 * Use context and mm from the old fence.
2077 	 */
2078 	new_fence = amdgpu_amdkfd_fence_create(
2079 				process_info->eviction_fence->base.context,
2080 				process_info->eviction_fence->mm);
2081 	if (!new_fence) {
2082 		pr_err("Failed to create eviction fence\n");
2083 		ret = -ENOMEM;
2084 		goto validate_map_fail;
2085 	}
2086 	dma_fence_put(&process_info->eviction_fence->base);
2087 	process_info->eviction_fence = new_fence;
2088 	*ef = dma_fence_get(&new_fence->base);
2089 
2090 	/* Wait for validate to finish and attach new eviction fence */
2091 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2092 		validate_list.head)
2093 		ttm_bo_wait(&mem->bo->tbo, false, false);
2094 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2095 		validate_list.head)
2096 		amdgpu_bo_fence(mem->bo,
2097 			&process_info->eviction_fence->base, true);
2098 
2099 	/* Attach eviction fence to PD / PT BOs */
2100 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2101 			    vm_list_node) {
2102 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2103 
2104 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2105 	}
2106 
2107 validate_map_fail:
2108 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2109 	amdgpu_sync_free(&sync_obj);
2110 ttm_reserve_fail:
2111 	mutex_unlock(&process_info->lock);
2112 	kfree(pd_bo_list);
2113 	return ret;
2114 }
2115