1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33 
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36 
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44 	uint64_t max_system_mem_limit;
45 	uint64_t max_ttm_mem_limit;
46 	int64_t system_mem_used;
47 	int64_t ttm_mem_used;
48 	spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50 
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
53 	uint32_t        domain;
54 	bool            wait;
55 };
56 
57 static const char * const domain_bit_to_string[] = {
58 		"CPU",
59 		"GTT",
60 		"VRAM",
61 		"GDS",
62 		"GWS",
63 		"OA"
64 };
65 
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67 
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69 
70 
get_amdgpu_device(struct kgd_dev * kgd)71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 {
73 	return (struct amdgpu_device *)kgd;
74 }
75 
check_if_add_bo_to_vm(struct amdgpu_vm * avm,struct kgd_mem * mem)76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
77 		struct kgd_mem *mem)
78 {
79 	struct kfd_bo_va_list *entry;
80 
81 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82 		if (entry->bo_va->base.vm == avm)
83 			return false;
84 
85 	return true;
86 }
87 
88 /* Set memory usage limits. Current, limits are
89  *  System (TTM + userptr) memory - 15/16th System RAM
90  *  TTM memory - 3/8th System RAM
91  */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 {
94 	struct sysinfo si;
95 	uint64_t mem;
96 
97 	si_meminfo(&si);
98 	mem = si.totalram - si.totalhigh;
99 	mem *= si.mem_unit;
100 
101 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105 		(kfd_mem_limit.max_system_mem_limit >> 20),
106 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
107 }
108 
109 /* Estimate page table size needed to represent a given memory size
110  *
111  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114  * for 2MB pages for TLB efficiency. However, small allocations and
115  * fragmented system memory still need some 4KB pages. We choose a
116  * compromise that should work in most cases without reserving too
117  * much memory for page tables unnecessarily (factor 16K, >> 14).
118  */
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120 
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122 		uint64_t size, u32 domain, bool sg)
123 {
124 	uint64_t reserved_for_pt =
125 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
127 	int ret = 0;
128 
129 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130 				       sizeof(struct amdgpu_bo));
131 
132 	vram_needed = 0;
133 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
134 		/* TTM GTT memory */
135 		system_mem_needed = acc_size + size;
136 		ttm_mem_needed = acc_size + size;
137 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
138 		/* Userptr */
139 		system_mem_needed = acc_size + size;
140 		ttm_mem_needed = acc_size;
141 	} else {
142 		/* VRAM and SG */
143 		system_mem_needed = acc_size;
144 		ttm_mem_needed = acc_size;
145 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
146 			vram_needed = size;
147 	}
148 
149 	spin_lock(&kfd_mem_limit.mem_limit_lock);
150 
151 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
152 	    kfd_mem_limit.max_system_mem_limit)
153 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
154 
155 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
156 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
157 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
158 	     kfd_mem_limit.max_ttm_mem_limit) ||
159 	    (adev->kfd.vram_used + vram_needed >
160 	     adev->gmc.real_vram_size - reserved_for_pt)) {
161 		ret = -ENOMEM;
162 	} else {
163 		kfd_mem_limit.system_mem_used += system_mem_needed;
164 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
165 		adev->kfd.vram_used += vram_needed;
166 	}
167 
168 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
169 	return ret;
170 }
171 
unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)172 static void unreserve_mem_limit(struct amdgpu_device *adev,
173 		uint64_t size, u32 domain, bool sg)
174 {
175 	size_t acc_size;
176 
177 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
178 				       sizeof(struct amdgpu_bo));
179 
180 	spin_lock(&kfd_mem_limit.mem_limit_lock);
181 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
182 		kfd_mem_limit.system_mem_used -= (acc_size + size);
183 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
184 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
185 		kfd_mem_limit.system_mem_used -= (acc_size + size);
186 		kfd_mem_limit.ttm_mem_used -= acc_size;
187 	} else {
188 		kfd_mem_limit.system_mem_used -= acc_size;
189 		kfd_mem_limit.ttm_mem_used -= acc_size;
190 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
191 			adev->kfd.vram_used -= size;
192 			WARN_ONCE(adev->kfd.vram_used < 0,
193 				  "kfd VRAM memory accounting unbalanced");
194 		}
195 	}
196 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
197 		  "kfd system memory accounting unbalanced");
198 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
199 		  "kfd TTM memory accounting unbalanced");
200 
201 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
202 }
203 
amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo * bo)204 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
205 {
206 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
207 	u32 domain = bo->preferred_domains;
208 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
209 
210 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
211 		domain = AMDGPU_GEM_DOMAIN_CPU;
212 		sg = false;
213 	}
214 
215 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
216 }
217 
218 
219 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
220  *  reservation object.
221  *
222  * @bo: [IN] Remove eviction fence(s) from this BO
223  * @ef: [IN] This eviction fence is removed if it
224  *  is present in the shared list.
225  *
226  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
227  */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)228 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
229 					struct amdgpu_amdkfd_fence *ef)
230 {
231 	struct dma_resv *resv = bo->tbo.base.resv;
232 	struct dma_resv_list *old, *new;
233 	unsigned int i, j, k;
234 
235 	if (!ef)
236 		return -EINVAL;
237 
238 	old = dma_resv_get_list(resv);
239 	if (!old)
240 		return 0;
241 
242 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
243 		      GFP_KERNEL);
244 	if (!new)
245 		return -ENOMEM;
246 
247 	/* Go through all the shared fences in the resevation object and sort
248 	 * the interesting ones to the end of the list.
249 	 */
250 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
251 		struct dma_fence *f;
252 
253 		f = rcu_dereference_protected(old->shared[i],
254 					      dma_resv_held(resv));
255 
256 		if (f->context == ef->base.context)
257 			RCU_INIT_POINTER(new->shared[--j], f);
258 		else
259 			RCU_INIT_POINTER(new->shared[k++], f);
260 	}
261 	new->shared_max = old->shared_max;
262 	new->shared_count = k;
263 
264 	/* Install the new fence list, seqcount provides the barriers */
265 	write_seqcount_begin(&resv->seq);
266 	RCU_INIT_POINTER(resv->fence, new);
267 	write_seqcount_end(&resv->seq);
268 
269 	/* Drop the references to the removed fences or move them to ef_list */
270 	for (i = j, k = 0; i < old->shared_count; ++i) {
271 		struct dma_fence *f;
272 
273 		f = rcu_dereference_protected(new->shared[i],
274 					      dma_resv_held(resv));
275 		dma_fence_put(f);
276 	}
277 	kfree_rcu(old, rcu);
278 
279 	return 0;
280 }
281 
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo * bo)282 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
283 {
284 	struct amdgpu_bo *root = bo;
285 	struct amdgpu_vm_bo_base *vm_bo;
286 	struct amdgpu_vm *vm;
287 	struct amdkfd_process_info *info;
288 	struct amdgpu_amdkfd_fence *ef;
289 	int ret;
290 
291 	/* we can always get vm_bo from root PD bo.*/
292 	while (root->parent)
293 		root = root->parent;
294 
295 	vm_bo = root->vm_bo;
296 	if (!vm_bo)
297 		return 0;
298 
299 	vm = vm_bo->vm;
300 	if (!vm)
301 		return 0;
302 
303 	info = vm->process_info;
304 	if (!info || !info->eviction_fence)
305 		return 0;
306 
307 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
308 			struct amdgpu_amdkfd_fence, base);
309 
310 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
311 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
312 	dma_resv_unlock(bo->tbo.base.resv);
313 
314 	dma_fence_put(&ef->base);
315 	return ret;
316 }
317 
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)318 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
319 				     bool wait)
320 {
321 	struct ttm_operation_ctx ctx = { false, false };
322 	int ret;
323 
324 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
325 		 "Called with userptr BO"))
326 		return -EINVAL;
327 
328 	amdgpu_bo_placement_from_domain(bo, domain);
329 
330 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
331 	if (ret)
332 		goto validate_fail;
333 	if (wait)
334 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
335 
336 validate_fail:
337 	return ret;
338 }
339 
amdgpu_amdkfd_validate(void * param,struct amdgpu_bo * bo)340 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
341 {
342 	struct amdgpu_vm_parser *p = param;
343 
344 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
345 }
346 
347 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
348  *
349  * Page directories are not updated here because huge page handling
350  * during page table updates can invalidate page directory entries
351  * again. Page directories are only updated after updating page
352  * tables.
353  */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm)354 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
355 {
356 	struct amdgpu_bo *pd = vm->root.base.bo;
357 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
358 	struct amdgpu_vm_parser param;
359 	int ret;
360 
361 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
362 	param.wait = false;
363 
364 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
365 					&param);
366 	if (ret) {
367 		pr_err("failed to validate PT BOs\n");
368 		return ret;
369 	}
370 
371 	ret = amdgpu_amdkfd_validate(&param, pd);
372 	if (ret) {
373 		pr_err("failed to validate PD\n");
374 		return ret;
375 	}
376 
377 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
378 
379 	if (vm->use_cpu_for_update) {
380 		ret = amdgpu_bo_kmap(pd, NULL);
381 		if (ret) {
382 			pr_err("failed to kmap PD, ret=%d\n", ret);
383 			return ret;
384 		}
385 	}
386 
387 	return 0;
388 }
389 
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)390 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
391 {
392 	struct amdgpu_bo *pd = vm->root.base.bo;
393 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
394 	int ret;
395 
396 	ret = amdgpu_vm_update_pdes(adev, vm, false);
397 	if (ret)
398 		return ret;
399 
400 	return amdgpu_sync_fence(sync, vm->last_update);
401 }
402 
get_pte_flags(struct amdgpu_device * adev,struct kgd_mem * mem)403 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
404 {
405 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
406 	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
407 	uint32_t mapping_flags;
408 
409 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
410 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
411 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
412 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
413 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
414 
415 	switch (adev->asic_type) {
416 	case CHIP_ARCTURUS:
417 		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
418 			if (bo_adev == adev)
419 				mapping_flags |= coherent ?
420 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
421 			else
422 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
423 		} else {
424 			mapping_flags |= coherent ?
425 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
426 		}
427 		break;
428 	default:
429 		mapping_flags |= coherent ?
430 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
431 	}
432 
433 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
434 }
435 
436 /* add_bo_to_vm - Add a BO to a VM
437  *
438  * Everything that needs to bo done only once when a BO is first added
439  * to a VM. It can later be mapped and unmapped many times without
440  * repeating these steps.
441  *
442  * 1. Allocate and initialize BO VA entry data structure
443  * 2. Add BO to the VM
444  * 3. Determine ASIC-specific PTE flags
445  * 4. Alloc page tables and directories if needed
446  * 4a.  Validate new page tables and directories
447  */
add_bo_to_vm(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql,struct kfd_bo_va_list ** p_bo_va_entry)448 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
449 		struct amdgpu_vm *vm, bool is_aql,
450 		struct kfd_bo_va_list **p_bo_va_entry)
451 {
452 	int ret;
453 	struct kfd_bo_va_list *bo_va_entry;
454 	struct amdgpu_bo *bo = mem->bo;
455 	uint64_t va = mem->va;
456 	struct list_head *list_bo_va = &mem->bo_va_list;
457 	unsigned long bo_size = bo->tbo.mem.size;
458 
459 	if (!va) {
460 		pr_err("Invalid VA when adding BO to VM\n");
461 		return -EINVAL;
462 	}
463 
464 	if (is_aql)
465 		va += bo_size;
466 
467 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
468 	if (!bo_va_entry)
469 		return -ENOMEM;
470 
471 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
472 			va + bo_size, vm);
473 
474 	/* Add BO to VM internal data structures*/
475 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
476 	if (!bo_va_entry->bo_va) {
477 		ret = -EINVAL;
478 		pr_err("Failed to add BO object to VM. ret == %d\n",
479 				ret);
480 		goto err_vmadd;
481 	}
482 
483 	bo_va_entry->va = va;
484 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
485 	bo_va_entry->kgd_dev = (void *)adev;
486 	list_add(&bo_va_entry->bo_list, list_bo_va);
487 
488 	if (p_bo_va_entry)
489 		*p_bo_va_entry = bo_va_entry;
490 
491 	/* Allocate validate page tables if needed */
492 	ret = vm_validate_pt_pd_bos(vm);
493 	if (ret) {
494 		pr_err("validate_pt_pd_bos() failed\n");
495 		goto err_alloc_pts;
496 	}
497 
498 	return 0;
499 
500 err_alloc_pts:
501 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
502 	list_del(&bo_va_entry->bo_list);
503 err_vmadd:
504 	kfree(bo_va_entry);
505 	return ret;
506 }
507 
remove_bo_from_vm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,unsigned long size)508 static void remove_bo_from_vm(struct amdgpu_device *adev,
509 		struct kfd_bo_va_list *entry, unsigned long size)
510 {
511 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
512 			entry->va,
513 			entry->va + size, entry);
514 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
515 	list_del(&entry->bo_list);
516 	kfree(entry);
517 }
518 
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)519 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
520 				struct amdkfd_process_info *process_info,
521 				bool userptr)
522 {
523 	struct ttm_validate_buffer *entry = &mem->validate_list;
524 	struct amdgpu_bo *bo = mem->bo;
525 
526 	INIT_LIST_HEAD(&entry->head);
527 	entry->num_shared = 1;
528 	entry->bo = &bo->tbo;
529 	mutex_lock(&process_info->lock);
530 	if (userptr)
531 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
532 	else
533 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
534 	mutex_unlock(&process_info->lock);
535 }
536 
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)537 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
538 		struct amdkfd_process_info *process_info)
539 {
540 	struct ttm_validate_buffer *bo_list_entry;
541 
542 	bo_list_entry = &mem->validate_list;
543 	mutex_lock(&process_info->lock);
544 	list_del(&bo_list_entry->head);
545 	mutex_unlock(&process_info->lock);
546 }
547 
548 /* Initializes user pages. It registers the MMU notifier and validates
549  * the userptr BO in the GTT domain.
550  *
551  * The BO must already be on the userptr_valid_list. Otherwise an
552  * eviction and restore may happen that leaves the new BO unmapped
553  * with the user mode queues running.
554  *
555  * Takes the process_info->lock to protect against concurrent restore
556  * workers.
557  *
558  * Returns 0 for success, negative errno for errors.
559  */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr)560 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
561 {
562 	struct amdkfd_process_info *process_info = mem->process_info;
563 	struct amdgpu_bo *bo = mem->bo;
564 	struct ttm_operation_ctx ctx = { true, false };
565 	int ret = 0;
566 
567 	mutex_lock(&process_info->lock);
568 
569 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
570 	if (ret) {
571 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
572 		goto out;
573 	}
574 
575 	ret = amdgpu_mn_register(bo, user_addr);
576 	if (ret) {
577 		pr_err("%s: Failed to register MMU notifier: %d\n",
578 		       __func__, ret);
579 		goto out;
580 	}
581 
582 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
583 	if (ret) {
584 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
585 		goto unregister_out;
586 	}
587 
588 	ret = amdgpu_bo_reserve(bo, true);
589 	if (ret) {
590 		pr_err("%s: Failed to reserve BO\n", __func__);
591 		goto release_out;
592 	}
593 	amdgpu_bo_placement_from_domain(bo, mem->domain);
594 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
595 	if (ret)
596 		pr_err("%s: failed to validate BO\n", __func__);
597 	amdgpu_bo_unreserve(bo);
598 
599 release_out:
600 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
601 unregister_out:
602 	if (ret)
603 		amdgpu_mn_unregister(bo);
604 out:
605 	mutex_unlock(&process_info->lock);
606 	return ret;
607 }
608 
609 /* Reserving a BO and its page table BOs must happen atomically to
610  * avoid deadlocks. Some operations update multiple VMs at once. Track
611  * all the reservation info in a context structure. Optionally a sync
612  * object can track VM updates.
613  */
614 struct bo_vm_reservation_context {
615 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
616 	unsigned int n_vms;		    /* Number of VMs reserved	    */
617 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
618 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
619 	struct list_head list, duplicates;  /* BO lists			    */
620 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
621 	bool reserved;			    /* Whether BOs are reserved	    */
622 };
623 
624 enum bo_vm_match {
625 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
626 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
627 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
628 };
629 
630 /**
631  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
632  * @mem: KFD BO structure.
633  * @vm: the VM to reserve.
634  * @ctx: the struct that will be used in unreserve_bo_and_vms().
635  */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)636 static int reserve_bo_and_vm(struct kgd_mem *mem,
637 			      struct amdgpu_vm *vm,
638 			      struct bo_vm_reservation_context *ctx)
639 {
640 	struct amdgpu_bo *bo = mem->bo;
641 	int ret;
642 
643 	WARN_ON(!vm);
644 
645 	ctx->reserved = false;
646 	ctx->n_vms = 1;
647 	ctx->sync = &mem->sync;
648 
649 	INIT_LIST_HEAD(&ctx->list);
650 	INIT_LIST_HEAD(&ctx->duplicates);
651 
652 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
653 	if (!ctx->vm_pd)
654 		return -ENOMEM;
655 
656 	ctx->kfd_bo.priority = 0;
657 	ctx->kfd_bo.tv.bo = &bo->tbo;
658 	ctx->kfd_bo.tv.num_shared = 1;
659 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
660 
661 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
662 
663 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
664 				     false, &ctx->duplicates);
665 	if (ret) {
666 		pr_err("Failed to reserve buffers in ttm.\n");
667 		kfree(ctx->vm_pd);
668 		ctx->vm_pd = NULL;
669 		return ret;
670 	}
671 
672 	ctx->reserved = true;
673 	return 0;
674 }
675 
676 /**
677  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
678  * @mem: KFD BO structure.
679  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
680  * is used. Otherwise, a single VM associated with the BO.
681  * @map_type: the mapping status that will be used to filter the VMs.
682  * @ctx: the struct that will be used in unreserve_bo_and_vms().
683  *
684  * Returns 0 for success, negative for failure.
685  */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)686 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
687 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
688 				struct bo_vm_reservation_context *ctx)
689 {
690 	struct amdgpu_bo *bo = mem->bo;
691 	struct kfd_bo_va_list *entry;
692 	unsigned int i;
693 	int ret;
694 
695 	ctx->reserved = false;
696 	ctx->n_vms = 0;
697 	ctx->vm_pd = NULL;
698 	ctx->sync = &mem->sync;
699 
700 	INIT_LIST_HEAD(&ctx->list);
701 	INIT_LIST_HEAD(&ctx->duplicates);
702 
703 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
704 		if ((vm && vm != entry->bo_va->base.vm) ||
705 			(entry->is_mapped != map_type
706 			&& map_type != BO_VM_ALL))
707 			continue;
708 
709 		ctx->n_vms++;
710 	}
711 
712 	if (ctx->n_vms != 0) {
713 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
714 				     GFP_KERNEL);
715 		if (!ctx->vm_pd)
716 			return -ENOMEM;
717 	}
718 
719 	ctx->kfd_bo.priority = 0;
720 	ctx->kfd_bo.tv.bo = &bo->tbo;
721 	ctx->kfd_bo.tv.num_shared = 1;
722 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
723 
724 	i = 0;
725 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
726 		if ((vm && vm != entry->bo_va->base.vm) ||
727 			(entry->is_mapped != map_type
728 			&& map_type != BO_VM_ALL))
729 			continue;
730 
731 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
732 				&ctx->vm_pd[i]);
733 		i++;
734 	}
735 
736 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
737 				     false, &ctx->duplicates);
738 	if (ret) {
739 		pr_err("Failed to reserve buffers in ttm.\n");
740 		kfree(ctx->vm_pd);
741 		ctx->vm_pd = NULL;
742 		return ret;
743 	}
744 
745 	ctx->reserved = true;
746 	return 0;
747 }
748 
749 /**
750  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
751  * @ctx: Reservation context to unreserve
752  * @wait: Optionally wait for a sync object representing pending VM updates
753  * @intr: Whether the wait is interruptible
754  *
755  * Also frees any resources allocated in
756  * reserve_bo_and_(cond_)vm(s). Returns the status from
757  * amdgpu_sync_wait.
758  */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)759 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
760 				 bool wait, bool intr)
761 {
762 	int ret = 0;
763 
764 	if (wait)
765 		ret = amdgpu_sync_wait(ctx->sync, intr);
766 
767 	if (ctx->reserved)
768 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
769 	kfree(ctx->vm_pd);
770 
771 	ctx->sync = NULL;
772 
773 	ctx->reserved = false;
774 	ctx->vm_pd = NULL;
775 
776 	return ret;
777 }
778 
unmap_bo_from_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)779 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
780 				struct kfd_bo_va_list *entry,
781 				struct amdgpu_sync *sync)
782 {
783 	struct amdgpu_bo_va *bo_va = entry->bo_va;
784 	struct amdgpu_vm *vm = bo_va->base.vm;
785 
786 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
787 
788 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
789 
790 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
791 
792 	return 0;
793 }
794 
update_gpuvm_pte(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)795 static int update_gpuvm_pte(struct amdgpu_device *adev,
796 		struct kfd_bo_va_list *entry,
797 		struct amdgpu_sync *sync)
798 {
799 	int ret;
800 	struct amdgpu_bo_va *bo_va = entry->bo_va;
801 
802 	/* Update the page tables  */
803 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
804 	if (ret) {
805 		pr_err("amdgpu_vm_bo_update failed\n");
806 		return ret;
807 	}
808 
809 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
810 }
811 
map_bo_to_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync,bool no_update_pte)812 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
813 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
814 		bool no_update_pte)
815 {
816 	int ret;
817 
818 	/* Set virtual address for the allocation */
819 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
820 			       amdgpu_bo_size(entry->bo_va->base.bo),
821 			       entry->pte_flags);
822 	if (ret) {
823 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
824 				entry->va, ret);
825 		return ret;
826 	}
827 
828 	if (no_update_pte)
829 		return 0;
830 
831 	ret = update_gpuvm_pte(adev, entry, sync);
832 	if (ret) {
833 		pr_err("update_gpuvm_pte() failed\n");
834 		goto update_gpuvm_pte_failed;
835 	}
836 
837 	return 0;
838 
839 update_gpuvm_pte_failed:
840 	unmap_bo_from_gpuvm(adev, entry, sync);
841 	return ret;
842 }
843 
create_doorbell_sg(uint64_t addr,uint32_t size)844 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
845 {
846 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
847 
848 	if (!sg)
849 		return NULL;
850 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
851 		kfree(sg);
852 		return NULL;
853 	}
854 	sg->sgl->dma_address = addr;
855 	sg->sgl->length = size;
856 #ifdef CONFIG_NEED_SG_DMA_LENGTH
857 	sg->sgl->dma_length = size;
858 #endif
859 	return sg;
860 }
861 
process_validate_vms(struct amdkfd_process_info * process_info)862 static int process_validate_vms(struct amdkfd_process_info *process_info)
863 {
864 	struct amdgpu_vm *peer_vm;
865 	int ret;
866 
867 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
868 			    vm_list_node) {
869 		ret = vm_validate_pt_pd_bos(peer_vm);
870 		if (ret)
871 			return ret;
872 	}
873 
874 	return 0;
875 }
876 
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)877 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
878 				 struct amdgpu_sync *sync)
879 {
880 	struct amdgpu_vm *peer_vm;
881 	int ret;
882 
883 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
884 			    vm_list_node) {
885 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
886 
887 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
888 				       AMDGPU_SYNC_NE_OWNER,
889 				       AMDGPU_FENCE_OWNER_KFD);
890 		if (ret)
891 			return ret;
892 	}
893 
894 	return 0;
895 }
896 
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)897 static int process_update_pds(struct amdkfd_process_info *process_info,
898 			      struct amdgpu_sync *sync)
899 {
900 	struct amdgpu_vm *peer_vm;
901 	int ret;
902 
903 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
904 			    vm_list_node) {
905 		ret = vm_update_pds(peer_vm, sync);
906 		if (ret)
907 			return ret;
908 	}
909 
910 	return 0;
911 }
912 
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)913 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
914 		       struct dma_fence **ef)
915 {
916 	struct amdkfd_process_info *info = NULL;
917 	int ret;
918 
919 	if (!*process_info) {
920 		info = kzalloc(sizeof(*info), GFP_KERNEL);
921 		if (!info)
922 			return -ENOMEM;
923 
924 		mutex_init(&info->lock);
925 		INIT_LIST_HEAD(&info->vm_list_head);
926 		INIT_LIST_HEAD(&info->kfd_bo_list);
927 		INIT_LIST_HEAD(&info->userptr_valid_list);
928 		INIT_LIST_HEAD(&info->userptr_inval_list);
929 
930 		info->eviction_fence =
931 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
932 						   current->mm);
933 		if (!info->eviction_fence) {
934 			pr_err("Failed to create eviction fence\n");
935 			ret = -ENOMEM;
936 			goto create_evict_fence_fail;
937 		}
938 
939 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
940 		atomic_set(&info->evicted_bos, 0);
941 		INIT_DELAYED_WORK(&info->restore_userptr_work,
942 				  amdgpu_amdkfd_restore_userptr_worker);
943 
944 		*process_info = info;
945 		*ef = dma_fence_get(&info->eviction_fence->base);
946 	}
947 
948 	vm->process_info = *process_info;
949 
950 	/* Validate page directory and attach eviction fence */
951 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
952 	if (ret)
953 		goto reserve_pd_fail;
954 	ret = vm_validate_pt_pd_bos(vm);
955 	if (ret) {
956 		pr_err("validate_pt_pd_bos() failed\n");
957 		goto validate_pd_fail;
958 	}
959 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
960 				  AMDGPU_FENCE_OWNER_KFD, false);
961 	if (ret)
962 		goto wait_pd_fail;
963 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
964 	if (ret)
965 		goto reserve_shared_fail;
966 	amdgpu_bo_fence(vm->root.base.bo,
967 			&vm->process_info->eviction_fence->base, true);
968 	amdgpu_bo_unreserve(vm->root.base.bo);
969 
970 	/* Update process info */
971 	mutex_lock(&vm->process_info->lock);
972 	list_add_tail(&vm->vm_list_node,
973 			&(vm->process_info->vm_list_head));
974 	vm->process_info->n_vms++;
975 	mutex_unlock(&vm->process_info->lock);
976 
977 	return 0;
978 
979 reserve_shared_fail:
980 wait_pd_fail:
981 validate_pd_fail:
982 	amdgpu_bo_unreserve(vm->root.base.bo);
983 reserve_pd_fail:
984 	vm->process_info = NULL;
985 	if (info) {
986 		/* Two fence references: one in info and one in *ef */
987 		dma_fence_put(&info->eviction_fence->base);
988 		dma_fence_put(*ef);
989 		*ef = NULL;
990 		*process_info = NULL;
991 		put_pid(info->pid);
992 create_evict_fence_fail:
993 		mutex_destroy(&info->lock);
994 		kfree(info);
995 	}
996 	return ret;
997 }
998 
amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev * kgd,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)999 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
1000 					  void **vm, void **process_info,
1001 					  struct dma_fence **ef)
1002 {
1003 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1004 	struct amdgpu_vm *new_vm;
1005 	int ret;
1006 
1007 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1008 	if (!new_vm)
1009 		return -ENOMEM;
1010 
1011 	/* Initialize AMDGPU part of the VM */
1012 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1013 	if (ret) {
1014 		pr_err("Failed init vm ret %d\n", ret);
1015 		goto amdgpu_vm_init_fail;
1016 	}
1017 
1018 	/* Initialize KFD part of the VM and process info */
1019 	ret = init_kfd_vm(new_vm, process_info, ef);
1020 	if (ret)
1021 		goto init_kfd_vm_fail;
1022 
1023 	*vm = (void *) new_vm;
1024 
1025 	return 0;
1026 
1027 init_kfd_vm_fail:
1028 	amdgpu_vm_fini(adev, new_vm);
1029 amdgpu_vm_init_fail:
1030 	kfree(new_vm);
1031 	return ret;
1032 }
1033 
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev * kgd,struct file * filp,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)1034 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1035 					   struct file *filp, u32 pasid,
1036 					   void **vm, void **process_info,
1037 					   struct dma_fence **ef)
1038 {
1039 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1040 	struct drm_file *drm_priv = filp->private_data;
1041 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1042 	struct amdgpu_vm *avm = &drv_priv->vm;
1043 	int ret;
1044 
1045 	/* Already a compute VM? */
1046 	if (avm->process_info)
1047 		return -EINVAL;
1048 
1049 	/* Convert VM into a compute VM */
1050 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1051 	if (ret)
1052 		return ret;
1053 
1054 	/* Initialize KFD part of the VM and process info */
1055 	ret = init_kfd_vm(avm, process_info, ef);
1056 	if (ret)
1057 		return ret;
1058 
1059 	*vm = (void *)avm;
1060 
1061 	return 0;
1062 }
1063 
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1064 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1065 				    struct amdgpu_vm *vm)
1066 {
1067 	struct amdkfd_process_info *process_info = vm->process_info;
1068 	struct amdgpu_bo *pd = vm->root.base.bo;
1069 
1070 	if (!process_info)
1071 		return;
1072 
1073 	/* Release eviction fence from PD */
1074 	amdgpu_bo_reserve(pd, false);
1075 	amdgpu_bo_fence(pd, NULL, false);
1076 	amdgpu_bo_unreserve(pd);
1077 
1078 	/* Update process info */
1079 	mutex_lock(&process_info->lock);
1080 	process_info->n_vms--;
1081 	list_del(&vm->vm_list_node);
1082 	mutex_unlock(&process_info->lock);
1083 
1084 	vm->process_info = NULL;
1085 
1086 	/* Release per-process resources when last compute VM is destroyed */
1087 	if (!process_info->n_vms) {
1088 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1089 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1090 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1091 
1092 		dma_fence_put(&process_info->eviction_fence->base);
1093 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1094 		put_pid(process_info->pid);
1095 		mutex_destroy(&process_info->lock);
1096 		kfree(process_info);
1097 	}
1098 }
1099 
amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev * kgd,void * vm)1100 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1101 {
1102 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1103 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1104 
1105 	if (WARN_ON(!kgd || !vm))
1106 		return;
1107 
1108 	pr_debug("Destroying process vm %p\n", vm);
1109 
1110 	/* Release the VM context */
1111 	amdgpu_vm_fini(adev, avm);
1112 	kfree(vm);
1113 }
1114 
amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev * kgd,void * vm)1115 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1116 {
1117 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1118         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1119 
1120 	if (WARN_ON(!kgd || !vm))
1121                 return;
1122 
1123         pr_debug("Releasing process vm %p\n", vm);
1124 
1125         /* The original pasid of amdgpu vm has already been
1126          * released during making a amdgpu vm to a compute vm
1127          * The current pasid is managed by kfd and will be
1128          * released on kfd process destroy. Set amdgpu pasid
1129          * to 0 to avoid duplicate release.
1130          */
1131 	amdgpu_vm_release_compute(adev, avm);
1132 }
1133 
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * vm)1134 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1135 {
1136 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1137 	struct amdgpu_bo *pd = avm->root.base.bo;
1138 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1139 
1140 	if (adev->asic_type < CHIP_VEGA10)
1141 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1142 	return avm->pd_phys_addr;
1143 }
1144 
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct kgd_dev * kgd,uint64_t va,uint64_t size,void * vm,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags)1145 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1146 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1147 		void *vm, struct kgd_mem **mem,
1148 		uint64_t *offset, uint32_t flags)
1149 {
1150 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1151 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1152 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1153 	struct sg_table *sg = NULL;
1154 	uint64_t user_addr = 0;
1155 	struct amdgpu_bo *bo;
1156 	struct amdgpu_bo_param bp;
1157 	u32 domain, alloc_domain;
1158 	u64 alloc_flags;
1159 	int ret;
1160 
1161 	/*
1162 	 * Check on which domain to allocate BO
1163 	 */
1164 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1165 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1166 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1167 		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1168 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1169 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1170 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1171 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1172 		alloc_flags = 0;
1173 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1174 		domain = AMDGPU_GEM_DOMAIN_GTT;
1175 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1176 		alloc_flags = 0;
1177 		if (!offset || !*offset)
1178 			return -EINVAL;
1179 		user_addr = untagged_addr(*offset);
1180 	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1181 			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1182 		domain = AMDGPU_GEM_DOMAIN_GTT;
1183 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1184 		bo_type = ttm_bo_type_sg;
1185 		alloc_flags = 0;
1186 		if (size > UINT_MAX)
1187 			return -EINVAL;
1188 		sg = create_doorbell_sg(*offset, size);
1189 		if (!sg)
1190 			return -ENOMEM;
1191 	} else {
1192 		return -EINVAL;
1193 	}
1194 
1195 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1196 	if (!*mem) {
1197 		ret = -ENOMEM;
1198 		goto err;
1199 	}
1200 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1201 	mutex_init(&(*mem)->lock);
1202 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1203 
1204 	/* Workaround for AQL queue wraparound bug. Map the same
1205 	 * memory twice. That means we only actually allocate half
1206 	 * the memory.
1207 	 */
1208 	if ((*mem)->aql_queue)
1209 		size = size >> 1;
1210 
1211 	(*mem)->alloc_flags = flags;
1212 
1213 	amdgpu_sync_create(&(*mem)->sync);
1214 
1215 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1216 	if (ret) {
1217 		pr_debug("Insufficient system memory\n");
1218 		goto err_reserve_limit;
1219 	}
1220 
1221 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1222 			va, size, domain_string(alloc_domain));
1223 
1224 	memset(&bp, 0, sizeof(bp));
1225 	bp.size = size;
1226 	bp.byte_align = 1;
1227 	bp.domain = alloc_domain;
1228 	bp.flags = alloc_flags;
1229 	bp.type = bo_type;
1230 	bp.resv = NULL;
1231 	ret = amdgpu_bo_create(adev, &bp, &bo);
1232 	if (ret) {
1233 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1234 				domain_string(alloc_domain), ret);
1235 		goto err_bo_create;
1236 	}
1237 	if (bo_type == ttm_bo_type_sg) {
1238 		bo->tbo.sg = sg;
1239 		bo->tbo.ttm->sg = sg;
1240 	}
1241 	bo->kfd_bo = *mem;
1242 	(*mem)->bo = bo;
1243 	if (user_addr)
1244 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1245 
1246 	(*mem)->va = va;
1247 	(*mem)->domain = domain;
1248 	(*mem)->mapped_to_gpu_memory = 0;
1249 	(*mem)->process_info = avm->process_info;
1250 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1251 
1252 	if (user_addr) {
1253 		ret = init_user_pages(*mem, user_addr);
1254 		if (ret)
1255 			goto allocate_init_user_pages_failed;
1256 	}
1257 
1258 	if (offset)
1259 		*offset = amdgpu_bo_mmap_offset(bo);
1260 
1261 	return 0;
1262 
1263 allocate_init_user_pages_failed:
1264 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1265 	amdgpu_bo_unref(&bo);
1266 	/* Don't unreserve system mem limit twice */
1267 	goto err_reserve_limit;
1268 err_bo_create:
1269 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1270 err_reserve_limit:
1271 	mutex_destroy(&(*mem)->lock);
1272 	kfree(*mem);
1273 err:
1274 	if (sg) {
1275 		sg_free_table(sg);
1276 		kfree(sg);
1277 	}
1278 	return ret;
1279 }
1280 
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,uint64_t * size)1281 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1282 		struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1283 {
1284 	struct amdkfd_process_info *process_info = mem->process_info;
1285 	unsigned long bo_size = mem->bo->tbo.mem.size;
1286 	struct kfd_bo_va_list *entry, *tmp;
1287 	struct bo_vm_reservation_context ctx;
1288 	struct ttm_validate_buffer *bo_list_entry;
1289 	unsigned int mapped_to_gpu_memory;
1290 	int ret;
1291 	bool is_imported = 0;
1292 
1293 	mutex_lock(&mem->lock);
1294 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1295 	is_imported = mem->is_imported;
1296 	mutex_unlock(&mem->lock);
1297 	/* lock is not needed after this, since mem is unused and will
1298 	 * be freed anyway
1299 	 */
1300 
1301 	if (mapped_to_gpu_memory > 0) {
1302 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1303 				mem->va, bo_size);
1304 		return -EBUSY;
1305 	}
1306 
1307 	/* Make sure restore workers don't access the BO any more */
1308 	bo_list_entry = &mem->validate_list;
1309 	mutex_lock(&process_info->lock);
1310 	list_del(&bo_list_entry->head);
1311 	mutex_unlock(&process_info->lock);
1312 
1313 	/* No more MMU notifiers */
1314 	amdgpu_mn_unregister(mem->bo);
1315 
1316 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1317 	if (unlikely(ret))
1318 		return ret;
1319 
1320 	/* The eviction fence should be removed by the last unmap.
1321 	 * TODO: Log an error condition if the bo still has the eviction fence
1322 	 * attached
1323 	 */
1324 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1325 					process_info->eviction_fence);
1326 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1327 		mem->va + bo_size * (1 + mem->aql_queue));
1328 
1329 	/* Remove from VM internal data structures */
1330 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1331 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1332 				entry, bo_size);
1333 
1334 	ret = unreserve_bo_and_vms(&ctx, false, false);
1335 
1336 	/* Free the sync object */
1337 	amdgpu_sync_free(&mem->sync);
1338 
1339 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1340 	 * remap BO. We need to free it.
1341 	 */
1342 	if (mem->bo->tbo.sg) {
1343 		sg_free_table(mem->bo->tbo.sg);
1344 		kfree(mem->bo->tbo.sg);
1345 	}
1346 
1347 	/* Update the size of the BO being freed if it was allocated from
1348 	 * VRAM and is not imported.
1349 	 */
1350 	if (size) {
1351 		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1352 		    (!is_imported))
1353 			*size = bo_size;
1354 		else
1355 			*size = 0;
1356 	}
1357 
1358 	/* Free the BO*/
1359 	drm_gem_object_put(&mem->bo->tbo.base);
1360 	mutex_destroy(&mem->lock);
1361 	kfree(mem);
1362 
1363 	return ret;
1364 }
1365 
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1366 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1367 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1368 {
1369 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1370 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1371 	int ret;
1372 	struct amdgpu_bo *bo;
1373 	uint32_t domain;
1374 	struct kfd_bo_va_list *entry;
1375 	struct bo_vm_reservation_context ctx;
1376 	struct kfd_bo_va_list *bo_va_entry = NULL;
1377 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1378 	unsigned long bo_size;
1379 	bool is_invalid_userptr = false;
1380 
1381 	bo = mem->bo;
1382 	if (!bo) {
1383 		pr_err("Invalid BO when mapping memory to GPU\n");
1384 		return -EINVAL;
1385 	}
1386 
1387 	/* Make sure restore is not running concurrently. Since we
1388 	 * don't map invalid userptr BOs, we rely on the next restore
1389 	 * worker to do the mapping
1390 	 */
1391 	mutex_lock(&mem->process_info->lock);
1392 
1393 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1394 	 * sure that the MMU notifier is no longer running
1395 	 * concurrently and the queues are actually stopped
1396 	 */
1397 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1398 		mmap_write_lock(current->mm);
1399 		is_invalid_userptr = atomic_read(&mem->invalid);
1400 		mmap_write_unlock(current->mm);
1401 	}
1402 
1403 	mutex_lock(&mem->lock);
1404 
1405 	domain = mem->domain;
1406 	bo_size = bo->tbo.mem.size;
1407 
1408 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1409 			mem->va,
1410 			mem->va + bo_size * (1 + mem->aql_queue),
1411 			vm, domain_string(domain));
1412 
1413 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1414 	if (unlikely(ret))
1415 		goto out;
1416 
1417 	/* Userptr can be marked as "not invalid", but not actually be
1418 	 * validated yet (still in the system domain). In that case
1419 	 * the queues are still stopped and we can leave mapping for
1420 	 * the next restore worker
1421 	 */
1422 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1423 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1424 		is_invalid_userptr = true;
1425 
1426 	if (check_if_add_bo_to_vm(avm, mem)) {
1427 		ret = add_bo_to_vm(adev, mem, avm, false,
1428 				&bo_va_entry);
1429 		if (ret)
1430 			goto add_bo_to_vm_failed;
1431 		if (mem->aql_queue) {
1432 			ret = add_bo_to_vm(adev, mem, avm,
1433 					true, &bo_va_entry_aql);
1434 			if (ret)
1435 				goto add_bo_to_vm_failed_aql;
1436 		}
1437 	} else {
1438 		ret = vm_validate_pt_pd_bos(avm);
1439 		if (unlikely(ret))
1440 			goto add_bo_to_vm_failed;
1441 	}
1442 
1443 	if (mem->mapped_to_gpu_memory == 0 &&
1444 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1445 		/* Validate BO only once. The eviction fence gets added to BO
1446 		 * the first time it is mapped. Validate will wait for all
1447 		 * background evictions to complete.
1448 		 */
1449 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1450 		if (ret) {
1451 			pr_debug("Validate failed\n");
1452 			goto map_bo_to_gpuvm_failed;
1453 		}
1454 	}
1455 
1456 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1457 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1458 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1459 					entry->va, entry->va + bo_size,
1460 					entry);
1461 
1462 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1463 					      is_invalid_userptr);
1464 			if (ret) {
1465 				pr_err("Failed to map bo to gpuvm\n");
1466 				goto map_bo_to_gpuvm_failed;
1467 			}
1468 
1469 			ret = vm_update_pds(vm, ctx.sync);
1470 			if (ret) {
1471 				pr_err("Failed to update page directories\n");
1472 				goto map_bo_to_gpuvm_failed;
1473 			}
1474 
1475 			entry->is_mapped = true;
1476 			mem->mapped_to_gpu_memory++;
1477 			pr_debug("\t INC mapping count %d\n",
1478 					mem->mapped_to_gpu_memory);
1479 		}
1480 	}
1481 
1482 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1483 		amdgpu_bo_fence(bo,
1484 				&avm->process_info->eviction_fence->base,
1485 				true);
1486 	ret = unreserve_bo_and_vms(&ctx, false, false);
1487 
1488 	goto out;
1489 
1490 map_bo_to_gpuvm_failed:
1491 	if (bo_va_entry_aql)
1492 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1493 add_bo_to_vm_failed_aql:
1494 	if (bo_va_entry)
1495 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1496 add_bo_to_vm_failed:
1497 	unreserve_bo_and_vms(&ctx, false, false);
1498 out:
1499 	mutex_unlock(&mem->process_info->lock);
1500 	mutex_unlock(&mem->lock);
1501 	return ret;
1502 }
1503 
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1504 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1505 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1506 {
1507 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1508 	struct amdkfd_process_info *process_info =
1509 		((struct amdgpu_vm *)vm)->process_info;
1510 	unsigned long bo_size = mem->bo->tbo.mem.size;
1511 	struct kfd_bo_va_list *entry;
1512 	struct bo_vm_reservation_context ctx;
1513 	int ret;
1514 
1515 	mutex_lock(&mem->lock);
1516 
1517 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1518 	if (unlikely(ret))
1519 		goto out;
1520 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1521 	if (ctx.n_vms == 0) {
1522 		ret = -EINVAL;
1523 		goto unreserve_out;
1524 	}
1525 
1526 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1527 	if (unlikely(ret))
1528 		goto unreserve_out;
1529 
1530 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1531 		mem->va,
1532 		mem->va + bo_size * (1 + mem->aql_queue),
1533 		vm);
1534 
1535 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1536 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1537 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1538 					entry->va,
1539 					entry->va + bo_size,
1540 					entry);
1541 
1542 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1543 			if (ret == 0) {
1544 				entry->is_mapped = false;
1545 			} else {
1546 				pr_err("failed to unmap VA 0x%llx\n",
1547 						mem->va);
1548 				goto unreserve_out;
1549 			}
1550 
1551 			mem->mapped_to_gpu_memory--;
1552 			pr_debug("\t DEC mapping count %d\n",
1553 					mem->mapped_to_gpu_memory);
1554 		}
1555 	}
1556 
1557 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1558 	 * required.
1559 	 */
1560 	if (mem->mapped_to_gpu_memory == 0 &&
1561 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1562 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1563 						process_info->eviction_fence);
1564 
1565 unreserve_out:
1566 	unreserve_bo_and_vms(&ctx, false, false);
1567 out:
1568 	mutex_unlock(&mem->lock);
1569 	return ret;
1570 }
1571 
amdgpu_amdkfd_gpuvm_sync_memory(struct kgd_dev * kgd,struct kgd_mem * mem,bool intr)1572 int amdgpu_amdkfd_gpuvm_sync_memory(
1573 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1574 {
1575 	struct amdgpu_sync sync;
1576 	int ret;
1577 
1578 	amdgpu_sync_create(&sync);
1579 
1580 	mutex_lock(&mem->lock);
1581 	amdgpu_sync_clone(&mem->sync, &sync);
1582 	mutex_unlock(&mem->lock);
1583 
1584 	ret = amdgpu_sync_wait(&sync, intr);
1585 	amdgpu_sync_free(&sync);
1586 	return ret;
1587 }
1588 
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev * kgd,struct kgd_mem * mem,void ** kptr,uint64_t * size)1589 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1590 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1591 {
1592 	int ret;
1593 	struct amdgpu_bo *bo = mem->bo;
1594 
1595 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1596 		pr_err("userptr can't be mapped to kernel\n");
1597 		return -EINVAL;
1598 	}
1599 
1600 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1601 	 * this BO in BO's restoring after eviction.
1602 	 */
1603 	mutex_lock(&mem->process_info->lock);
1604 
1605 	ret = amdgpu_bo_reserve(bo, true);
1606 	if (ret) {
1607 		pr_err("Failed to reserve bo. ret %d\n", ret);
1608 		goto bo_reserve_failed;
1609 	}
1610 
1611 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1612 	if (ret) {
1613 		pr_err("Failed to pin bo. ret %d\n", ret);
1614 		goto pin_failed;
1615 	}
1616 
1617 	ret = amdgpu_bo_kmap(bo, kptr);
1618 	if (ret) {
1619 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1620 		goto kmap_failed;
1621 	}
1622 
1623 	amdgpu_amdkfd_remove_eviction_fence(
1624 		bo, mem->process_info->eviction_fence);
1625 	list_del_init(&mem->validate_list.head);
1626 
1627 	if (size)
1628 		*size = amdgpu_bo_size(bo);
1629 
1630 	amdgpu_bo_unreserve(bo);
1631 
1632 	mutex_unlock(&mem->process_info->lock);
1633 	return 0;
1634 
1635 kmap_failed:
1636 	amdgpu_bo_unpin(bo);
1637 pin_failed:
1638 	amdgpu_bo_unreserve(bo);
1639 bo_reserve_failed:
1640 	mutex_unlock(&mem->process_info->lock);
1641 
1642 	return ret;
1643 }
1644 
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev * kgd,struct kfd_vm_fault_info * mem)1645 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1646 					      struct kfd_vm_fault_info *mem)
1647 {
1648 	struct amdgpu_device *adev;
1649 
1650 	adev = (struct amdgpu_device *)kgd;
1651 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1652 		*mem = *adev->gmc.vm_fault_info;
1653 		mb();
1654 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1655 	}
1656 	return 0;
1657 }
1658 
amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev * kgd,struct dma_buf * dma_buf,uint64_t va,void * vm,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)1659 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1660 				      struct dma_buf *dma_buf,
1661 				      uint64_t va, void *vm,
1662 				      struct kgd_mem **mem, uint64_t *size,
1663 				      uint64_t *mmap_offset)
1664 {
1665 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1666 	struct drm_gem_object *obj;
1667 	struct amdgpu_bo *bo;
1668 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1669 
1670 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1671 		/* Can't handle non-graphics buffers */
1672 		return -EINVAL;
1673 
1674 	obj = dma_buf->priv;
1675 	if (drm_to_adev(obj->dev) != adev)
1676 		/* Can't handle buffers from other devices */
1677 		return -EINVAL;
1678 
1679 	bo = gem_to_amdgpu_bo(obj);
1680 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1681 				    AMDGPU_GEM_DOMAIN_GTT)))
1682 		/* Only VRAM and GTT BOs are supported */
1683 		return -EINVAL;
1684 
1685 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1686 	if (!*mem)
1687 		return -ENOMEM;
1688 
1689 	if (size)
1690 		*size = amdgpu_bo_size(bo);
1691 
1692 	if (mmap_offset)
1693 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1694 
1695 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1696 	mutex_init(&(*mem)->lock);
1697 
1698 	(*mem)->alloc_flags =
1699 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1700 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1701 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1702 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1703 
1704 	drm_gem_object_get(&bo->tbo.base);
1705 	(*mem)->bo = bo;
1706 	(*mem)->va = va;
1707 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1708 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1709 	(*mem)->mapped_to_gpu_memory = 0;
1710 	(*mem)->process_info = avm->process_info;
1711 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1712 	amdgpu_sync_create(&(*mem)->sync);
1713 	(*mem)->is_imported = true;
1714 
1715 	return 0;
1716 }
1717 
1718 /* Evict a userptr BO by stopping the queues if necessary
1719  *
1720  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1721  * cannot do any memory allocations, and cannot take any locks that
1722  * are held elsewhere while allocating memory. Therefore this is as
1723  * simple as possible, using atomic counters.
1724  *
1725  * It doesn't do anything to the BO itself. The real work happens in
1726  * restore, where we get updated page addresses. This function only
1727  * ensures that GPU access to the BO is stopped.
1728  */
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)1729 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1730 				struct mm_struct *mm)
1731 {
1732 	struct amdkfd_process_info *process_info = mem->process_info;
1733 	int evicted_bos;
1734 	int r = 0;
1735 
1736 	atomic_inc(&mem->invalid);
1737 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1738 	if (evicted_bos == 1) {
1739 		/* First eviction, stop the queues */
1740 		r = kgd2kfd_quiesce_mm(mm);
1741 		if (r)
1742 			pr_err("Failed to quiesce KFD\n");
1743 		schedule_delayed_work(&process_info->restore_userptr_work,
1744 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1745 	}
1746 
1747 	return r;
1748 }
1749 
1750 /* Update invalid userptr BOs
1751  *
1752  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1753  * userptr_inval_list and updates user pages for all BOs that have
1754  * been invalidated since their last update.
1755  */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)1756 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1757 				     struct mm_struct *mm)
1758 {
1759 	struct kgd_mem *mem, *tmp_mem;
1760 	struct amdgpu_bo *bo;
1761 	struct ttm_operation_ctx ctx = { false, false };
1762 	int invalid, ret;
1763 
1764 	/* Move all invalidated BOs to the userptr_inval_list and
1765 	 * release their user pages by migration to the CPU domain
1766 	 */
1767 	list_for_each_entry_safe(mem, tmp_mem,
1768 				 &process_info->userptr_valid_list,
1769 				 validate_list.head) {
1770 		if (!atomic_read(&mem->invalid))
1771 			continue; /* BO is still valid */
1772 
1773 		bo = mem->bo;
1774 
1775 		if (amdgpu_bo_reserve(bo, true))
1776 			return -EAGAIN;
1777 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1778 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1779 		amdgpu_bo_unreserve(bo);
1780 		if (ret) {
1781 			pr_err("%s: Failed to invalidate userptr BO\n",
1782 			       __func__);
1783 			return -EAGAIN;
1784 		}
1785 
1786 		list_move_tail(&mem->validate_list.head,
1787 			       &process_info->userptr_inval_list);
1788 	}
1789 
1790 	if (list_empty(&process_info->userptr_inval_list))
1791 		return 0; /* All evicted userptr BOs were freed */
1792 
1793 	/* Go through userptr_inval_list and update any invalid user_pages */
1794 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1795 			    validate_list.head) {
1796 		invalid = atomic_read(&mem->invalid);
1797 		if (!invalid)
1798 			/* BO hasn't been invalidated since the last
1799 			 * revalidation attempt. Keep its BO list.
1800 			 */
1801 			continue;
1802 
1803 		bo = mem->bo;
1804 
1805 		/* Get updated user pages */
1806 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1807 		if (ret) {
1808 			pr_debug("%s: Failed to get user pages: %d\n",
1809 				__func__, ret);
1810 
1811 			/* Return error -EBUSY or -ENOMEM, retry restore */
1812 			return ret;
1813 		}
1814 
1815 		/*
1816 		 * FIXME: Cannot ignore the return code, must hold
1817 		 * notifier_lock
1818 		 */
1819 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1820 
1821 		/* Mark the BO as valid unless it was invalidated
1822 		 * again concurrently.
1823 		 */
1824 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1825 			return -EAGAIN;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 /* Validate invalid userptr BOs
1832  *
1833  * Validates BOs on the userptr_inval_list, and moves them back to the
1834  * userptr_valid_list. Also updates GPUVM page tables with new page
1835  * addresses and waits for the page table updates to complete.
1836  */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)1837 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1838 {
1839 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1840 	struct list_head resv_list, duplicates;
1841 	struct ww_acquire_ctx ticket;
1842 	struct amdgpu_sync sync;
1843 
1844 	struct amdgpu_vm *peer_vm;
1845 	struct kgd_mem *mem, *tmp_mem;
1846 	struct amdgpu_bo *bo;
1847 	struct ttm_operation_ctx ctx = { false, false };
1848 	int i, ret;
1849 
1850 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1851 				     sizeof(struct amdgpu_bo_list_entry),
1852 				     GFP_KERNEL);
1853 	if (!pd_bo_list_entries) {
1854 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1855 		ret = -ENOMEM;
1856 		goto out_no_mem;
1857 	}
1858 
1859 	INIT_LIST_HEAD(&resv_list);
1860 	INIT_LIST_HEAD(&duplicates);
1861 
1862 	/* Get all the page directory BOs that need to be reserved */
1863 	i = 0;
1864 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1865 			    vm_list_node)
1866 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1867 				    &pd_bo_list_entries[i++]);
1868 	/* Add the userptr_inval_list entries to resv_list */
1869 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1870 			    validate_list.head) {
1871 		list_add_tail(&mem->resv_list.head, &resv_list);
1872 		mem->resv_list.bo = mem->validate_list.bo;
1873 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1874 	}
1875 
1876 	/* Reserve all BOs and page tables for validation */
1877 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1878 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1879 	if (ret)
1880 		goto out_free;
1881 
1882 	amdgpu_sync_create(&sync);
1883 
1884 	ret = process_validate_vms(process_info);
1885 	if (ret)
1886 		goto unreserve_out;
1887 
1888 	/* Validate BOs and update GPUVM page tables */
1889 	list_for_each_entry_safe(mem, tmp_mem,
1890 				 &process_info->userptr_inval_list,
1891 				 validate_list.head) {
1892 		struct kfd_bo_va_list *bo_va_entry;
1893 
1894 		bo = mem->bo;
1895 
1896 		/* Validate the BO if we got user pages */
1897 		if (bo->tbo.ttm->pages[0]) {
1898 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1899 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1900 			if (ret) {
1901 				pr_err("%s: failed to validate BO\n", __func__);
1902 				goto unreserve_out;
1903 			}
1904 		}
1905 
1906 		list_move_tail(&mem->validate_list.head,
1907 			       &process_info->userptr_valid_list);
1908 
1909 		/* Update mapping. If the BO was not validated
1910 		 * (because we couldn't get user pages), this will
1911 		 * clear the page table entries, which will result in
1912 		 * VM faults if the GPU tries to access the invalid
1913 		 * memory.
1914 		 */
1915 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1916 			if (!bo_va_entry->is_mapped)
1917 				continue;
1918 
1919 			ret = update_gpuvm_pte((struct amdgpu_device *)
1920 					       bo_va_entry->kgd_dev,
1921 					       bo_va_entry, &sync);
1922 			if (ret) {
1923 				pr_err("%s: update PTE failed\n", __func__);
1924 				/* make sure this gets validated again */
1925 				atomic_inc(&mem->invalid);
1926 				goto unreserve_out;
1927 			}
1928 		}
1929 	}
1930 
1931 	/* Update page directories */
1932 	ret = process_update_pds(process_info, &sync);
1933 
1934 unreserve_out:
1935 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1936 	amdgpu_sync_wait(&sync, false);
1937 	amdgpu_sync_free(&sync);
1938 out_free:
1939 	kfree(pd_bo_list_entries);
1940 out_no_mem:
1941 
1942 	return ret;
1943 }
1944 
1945 /* Worker callback to restore evicted userptr BOs
1946  *
1947  * Tries to update and validate all userptr BOs. If successful and no
1948  * concurrent evictions happened, the queues are restarted. Otherwise,
1949  * reschedule for another attempt later.
1950  */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)1951 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1952 {
1953 	struct delayed_work *dwork = to_delayed_work(work);
1954 	struct amdkfd_process_info *process_info =
1955 		container_of(dwork, struct amdkfd_process_info,
1956 			     restore_userptr_work);
1957 	struct task_struct *usertask;
1958 	struct mm_struct *mm;
1959 	int evicted_bos;
1960 
1961 	evicted_bos = atomic_read(&process_info->evicted_bos);
1962 	if (!evicted_bos)
1963 		return;
1964 
1965 	/* Reference task and mm in case of concurrent process termination */
1966 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1967 	if (!usertask)
1968 		return;
1969 	mm = get_task_mm(usertask);
1970 	if (!mm) {
1971 		put_task_struct(usertask);
1972 		return;
1973 	}
1974 
1975 	mutex_lock(&process_info->lock);
1976 
1977 	if (update_invalid_user_pages(process_info, mm))
1978 		goto unlock_out;
1979 	/* userptr_inval_list can be empty if all evicted userptr BOs
1980 	 * have been freed. In that case there is nothing to validate
1981 	 * and we can just restart the queues.
1982 	 */
1983 	if (!list_empty(&process_info->userptr_inval_list)) {
1984 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1985 			goto unlock_out; /* Concurrent eviction, try again */
1986 
1987 		if (validate_invalid_user_pages(process_info))
1988 			goto unlock_out;
1989 	}
1990 	/* Final check for concurrent evicton and atomic update. If
1991 	 * another eviction happens after successful update, it will
1992 	 * be a first eviction that calls quiesce_mm. The eviction
1993 	 * reference counting inside KFD will handle this case.
1994 	 */
1995 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1996 	    evicted_bos)
1997 		goto unlock_out;
1998 	evicted_bos = 0;
1999 	if (kgd2kfd_resume_mm(mm)) {
2000 		pr_err("%s: Failed to resume KFD\n", __func__);
2001 		/* No recovery from this failure. Probably the CP is
2002 		 * hanging. No point trying again.
2003 		 */
2004 	}
2005 
2006 unlock_out:
2007 	mutex_unlock(&process_info->lock);
2008 	mmput(mm);
2009 	put_task_struct(usertask);
2010 
2011 	/* If validation failed, reschedule another attempt */
2012 	if (evicted_bos)
2013 		schedule_delayed_work(&process_info->restore_userptr_work,
2014 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2015 }
2016 
2017 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2018  *   KFD process identified by process_info
2019  *
2020  * @process_info: amdkfd_process_info of the KFD process
2021  *
2022  * After memory eviction, restore thread calls this function. The function
2023  * should be called when the Process is still valid. BO restore involves -
2024  *
2025  * 1.  Release old eviction fence and create new one
2026  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2027  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2028  *     BOs that need to be reserved.
2029  * 4.  Reserve all the BOs
2030  * 5.  Validate of PD and PT BOs.
2031  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2032  * 7.  Add fence to all PD and PT BOs.
2033  * 8.  Unreserve all BOs
2034  */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence ** ef)2035 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2036 {
2037 	struct amdgpu_bo_list_entry *pd_bo_list;
2038 	struct amdkfd_process_info *process_info = info;
2039 	struct amdgpu_vm *peer_vm;
2040 	struct kgd_mem *mem;
2041 	struct bo_vm_reservation_context ctx;
2042 	struct amdgpu_amdkfd_fence *new_fence;
2043 	int ret = 0, i;
2044 	struct list_head duplicate_save;
2045 	struct amdgpu_sync sync_obj;
2046 
2047 	INIT_LIST_HEAD(&duplicate_save);
2048 	INIT_LIST_HEAD(&ctx.list);
2049 	INIT_LIST_HEAD(&ctx.duplicates);
2050 
2051 	pd_bo_list = kcalloc(process_info->n_vms,
2052 			     sizeof(struct amdgpu_bo_list_entry),
2053 			     GFP_KERNEL);
2054 	if (!pd_bo_list)
2055 		return -ENOMEM;
2056 
2057 	i = 0;
2058 	mutex_lock(&process_info->lock);
2059 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2060 			vm_list_node)
2061 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2062 
2063 	/* Reserve all BOs and page tables/directory. Add all BOs from
2064 	 * kfd_bo_list to ctx.list
2065 	 */
2066 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2067 			    validate_list.head) {
2068 
2069 		list_add_tail(&mem->resv_list.head, &ctx.list);
2070 		mem->resv_list.bo = mem->validate_list.bo;
2071 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2072 	}
2073 
2074 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2075 				     false, &duplicate_save);
2076 	if (ret) {
2077 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2078 		goto ttm_reserve_fail;
2079 	}
2080 
2081 	amdgpu_sync_create(&sync_obj);
2082 
2083 	/* Validate PDs and PTs */
2084 	ret = process_validate_vms(process_info);
2085 	if (ret)
2086 		goto validate_map_fail;
2087 
2088 	ret = process_sync_pds_resv(process_info, &sync_obj);
2089 	if (ret) {
2090 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2091 		goto validate_map_fail;
2092 	}
2093 
2094 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2095 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2096 			    validate_list.head) {
2097 
2098 		struct amdgpu_bo *bo = mem->bo;
2099 		uint32_t domain = mem->domain;
2100 		struct kfd_bo_va_list *bo_va_entry;
2101 
2102 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2103 		if (ret) {
2104 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2105 			goto validate_map_fail;
2106 		}
2107 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2108 		if (ret) {
2109 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2110 			goto validate_map_fail;
2111 		}
2112 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2113 				    bo_list) {
2114 			ret = update_gpuvm_pte((struct amdgpu_device *)
2115 					      bo_va_entry->kgd_dev,
2116 					      bo_va_entry,
2117 					      &sync_obj);
2118 			if (ret) {
2119 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2120 				goto validate_map_fail;
2121 			}
2122 		}
2123 	}
2124 
2125 	/* Update page directories */
2126 	ret = process_update_pds(process_info, &sync_obj);
2127 	if (ret) {
2128 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2129 		goto validate_map_fail;
2130 	}
2131 
2132 	/* Wait for validate and PT updates to finish */
2133 	amdgpu_sync_wait(&sync_obj, false);
2134 
2135 	/* Release old eviction fence and create new one, because fence only
2136 	 * goes from unsignaled to signaled, fence cannot be reused.
2137 	 * Use context and mm from the old fence.
2138 	 */
2139 	new_fence = amdgpu_amdkfd_fence_create(
2140 				process_info->eviction_fence->base.context,
2141 				process_info->eviction_fence->mm);
2142 	if (!new_fence) {
2143 		pr_err("Failed to create eviction fence\n");
2144 		ret = -ENOMEM;
2145 		goto validate_map_fail;
2146 	}
2147 	dma_fence_put(&process_info->eviction_fence->base);
2148 	process_info->eviction_fence = new_fence;
2149 	*ef = dma_fence_get(&new_fence->base);
2150 
2151 	/* Attach new eviction fence to all BOs */
2152 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2153 		validate_list.head)
2154 		amdgpu_bo_fence(mem->bo,
2155 			&process_info->eviction_fence->base, true);
2156 
2157 	/* Attach eviction fence to PD / PT BOs */
2158 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2159 			    vm_list_node) {
2160 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2161 
2162 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2163 	}
2164 
2165 validate_map_fail:
2166 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2167 	amdgpu_sync_free(&sync_obj);
2168 ttm_reserve_fail:
2169 	mutex_unlock(&process_info->lock);
2170 	kfree(pd_bo_list);
2171 	return ret;
2172 }
2173 
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)2174 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2175 {
2176 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2177 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2178 	int ret;
2179 
2180 	if (!info || !gws)
2181 		return -EINVAL;
2182 
2183 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2184 	if (!*mem)
2185 		return -ENOMEM;
2186 
2187 	mutex_init(&(*mem)->lock);
2188 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
2189 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2190 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2191 	(*mem)->process_info = process_info;
2192 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2193 	amdgpu_sync_create(&(*mem)->sync);
2194 
2195 
2196 	/* Validate gws bo the first time it is added to process */
2197 	mutex_lock(&(*mem)->process_info->lock);
2198 	ret = amdgpu_bo_reserve(gws_bo, false);
2199 	if (unlikely(ret)) {
2200 		pr_err("Reserve gws bo failed %d\n", ret);
2201 		goto bo_reservation_failure;
2202 	}
2203 
2204 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2205 	if (ret) {
2206 		pr_err("GWS BO validate failed %d\n", ret);
2207 		goto bo_validation_failure;
2208 	}
2209 	/* GWS resource is shared b/t amdgpu and amdkfd
2210 	 * Add process eviction fence to bo so they can
2211 	 * evict each other.
2212 	 */
2213 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2214 	if (ret)
2215 		goto reserve_shared_fail;
2216 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2217 	amdgpu_bo_unreserve(gws_bo);
2218 	mutex_unlock(&(*mem)->process_info->lock);
2219 
2220 	return ret;
2221 
2222 reserve_shared_fail:
2223 bo_validation_failure:
2224 	amdgpu_bo_unreserve(gws_bo);
2225 bo_reservation_failure:
2226 	mutex_unlock(&(*mem)->process_info->lock);
2227 	amdgpu_sync_free(&(*mem)->sync);
2228 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2229 	amdgpu_bo_unref(&gws_bo);
2230 	mutex_destroy(&(*mem)->lock);
2231 	kfree(*mem);
2232 	*mem = NULL;
2233 	return ret;
2234 }
2235 
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)2236 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2237 {
2238 	int ret;
2239 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2240 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2241 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2242 
2243 	/* Remove BO from process's validate list so restore worker won't touch
2244 	 * it anymore
2245 	 */
2246 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2247 
2248 	ret = amdgpu_bo_reserve(gws_bo, false);
2249 	if (unlikely(ret)) {
2250 		pr_err("Reserve gws bo failed %d\n", ret);
2251 		//TODO add BO back to validate_list?
2252 		return ret;
2253 	}
2254 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2255 			process_info->eviction_fence);
2256 	amdgpu_bo_unreserve(gws_bo);
2257 	amdgpu_sync_free(&kgd_mem->sync);
2258 	amdgpu_bo_unref(&gws_bo);
2259 	mutex_destroy(&kgd_mem->lock);
2260 	kfree(mem);
2261 	return 0;
2262 }
2263 
2264 /* Returns GPU-specific tiling mode information */
amdgpu_amdkfd_get_tile_config(struct kgd_dev * kgd,struct tile_config * config)2265 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2266 				struct tile_config *config)
2267 {
2268 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2269 
2270 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2271 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2272 	config->num_tile_configs =
2273 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2274 	config->macro_tile_config_ptr =
2275 			adev->gfx.config.macrotile_mode_array;
2276 	config->num_macro_tile_configs =
2277 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2278 
2279 	/* Those values are not set from GFX9 onwards */
2280 	config->num_banks = adev->gfx.config.num_banks;
2281 	config->num_ranks = adev->gfx.config.num_ranks;
2282 
2283 	return 0;
2284 }
2285