1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/amd-iommu.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38
39 struct mm_struct;
40
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_dbgmgr.h"
44 #include "kfd_iommu.h"
45
46 /*
47 * List of struct kfd_process (field kfd_process).
48 * Unique/indexed by mm_struct*
49 */
50 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
51 static DEFINE_MUTEX(kfd_processes_mutex);
52
53 DEFINE_SRCU(kfd_processes_srcu);
54
55 /* For process termination handling */
56 static struct workqueue_struct *kfd_process_wq;
57
58 /* Ordered, single-threaded workqueue for restoring evicted
59 * processes. Restoring multiple processes concurrently under memory
60 * pressure can lead to processes blocking each other from validating
61 * their BOs and result in a live-lock situation where processes
62 * remain evicted indefinitely.
63 */
64 static struct workqueue_struct *kfd_restore_wq;
65
66 static struct kfd_process *find_process(const struct task_struct *thread);
67 static void kfd_process_ref_release(struct kref *ref);
68 static struct kfd_process *create_process(const struct task_struct *thread);
69 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
70
71 static void evict_process_worker(struct work_struct *work);
72 static void restore_process_worker(struct work_struct *work);
73
74 struct kfd_procfs_tree {
75 struct kobject *kobj;
76 };
77
78 static struct kfd_procfs_tree procfs;
79
80 /*
81 * Structure for SDMA activity tracking
82 */
83 struct kfd_sdma_activity_handler_workarea {
84 struct work_struct sdma_activity_work;
85 struct kfd_process_device *pdd;
86 uint64_t sdma_activity_counter;
87 };
88
89 struct temp_sdma_queue_list {
90 uint64_t __user *rptr;
91 uint64_t sdma_val;
92 unsigned int queue_id;
93 struct list_head list;
94 };
95
kfd_sdma_activity_worker(struct work_struct * work)96 static void kfd_sdma_activity_worker(struct work_struct *work)
97 {
98 struct kfd_sdma_activity_handler_workarea *workarea;
99 struct kfd_process_device *pdd;
100 uint64_t val;
101 struct mm_struct *mm;
102 struct queue *q;
103 struct qcm_process_device *qpd;
104 struct device_queue_manager *dqm;
105 int ret = 0;
106 struct temp_sdma_queue_list sdma_q_list;
107 struct temp_sdma_queue_list *sdma_q, *next;
108
109 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
110 sdma_activity_work);
111 if (!workarea)
112 return;
113
114 pdd = workarea->pdd;
115 if (!pdd)
116 return;
117 dqm = pdd->dev->dqm;
118 qpd = &pdd->qpd;
119 if (!dqm || !qpd)
120 return;
121 /*
122 * Total SDMA activity is current SDMA activity + past SDMA activity
123 * Past SDMA count is stored in pdd.
124 * To get the current activity counters for all active SDMA queues,
125 * we loop over all SDMA queues and get their counts from user-space.
126 *
127 * We cannot call get_user() with dqm_lock held as it can cause
128 * a circular lock dependency situation. To read the SDMA stats,
129 * we need to do the following:
130 *
131 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
132 * with dqm_lock/dqm_unlock().
133 * 2. Call get_user() for each node in temporary list without dqm_lock.
134 * Save the SDMA count for each node and also add the count to the total
135 * SDMA count counter.
136 * Its possible, during this step, a few SDMA queue nodes got deleted
137 * from the qpd->queues_list.
138 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
139 * If any node got deleted, its SDMA count would be captured in the sdma
140 * past activity counter. So subtract the SDMA counter stored in step 2
141 * for this node from the total SDMA count.
142 */
143 INIT_LIST_HEAD(&sdma_q_list.list);
144
145 /*
146 * Create the temp list of all SDMA queues
147 */
148 dqm_lock(dqm);
149
150 list_for_each_entry(q, &qpd->queues_list, list) {
151 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
152 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
153 continue;
154
155 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
156 if (!sdma_q) {
157 dqm_unlock(dqm);
158 goto cleanup;
159 }
160
161 INIT_LIST_HEAD(&sdma_q->list);
162 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
163 sdma_q->queue_id = q->properties.queue_id;
164 list_add_tail(&sdma_q->list, &sdma_q_list.list);
165 }
166
167 /*
168 * If the temp list is empty, then no SDMA queues nodes were found in
169 * qpd->queues_list. Return the past activity count as the total sdma
170 * count
171 */
172 if (list_empty(&sdma_q_list.list)) {
173 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
174 dqm_unlock(dqm);
175 return;
176 }
177
178 dqm_unlock(dqm);
179
180 /*
181 * Get the usage count for each SDMA queue in temp_list.
182 */
183 mm = get_task_mm(pdd->process->lead_thread);
184 if (!mm)
185 goto cleanup;
186
187 kthread_use_mm(mm);
188
189 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
190 val = 0;
191 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
192 if (ret) {
193 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
194 sdma_q->queue_id);
195 } else {
196 sdma_q->sdma_val = val;
197 workarea->sdma_activity_counter += val;
198 }
199 }
200
201 kthread_unuse_mm(mm);
202 mmput(mm);
203
204 /*
205 * Do a second iteration over qpd_queues_list to check if any SDMA
206 * nodes got deleted while fetching SDMA counter.
207 */
208 dqm_lock(dqm);
209
210 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
211
212 list_for_each_entry(q, &qpd->queues_list, list) {
213 if (list_empty(&sdma_q_list.list))
214 break;
215
216 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
217 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
218 continue;
219
220 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
221 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
222 (sdma_q->queue_id == q->properties.queue_id)) {
223 list_del(&sdma_q->list);
224 kfree(sdma_q);
225 break;
226 }
227 }
228 }
229
230 dqm_unlock(dqm);
231
232 /*
233 * If temp list is not empty, it implies some queues got deleted
234 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
235 * count for each node from the total SDMA count.
236 */
237 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
238 workarea->sdma_activity_counter -= sdma_q->sdma_val;
239 list_del(&sdma_q->list);
240 kfree(sdma_q);
241 }
242
243 return;
244
245 cleanup:
246 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
247 list_del(&sdma_q->list);
248 kfree(sdma_q);
249 }
250 }
251
252 /**
253 * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
254 * by current process. Translates acquired wave count into number of compute units
255 * that are occupied.
256 *
257 * @atr: Handle of attribute that allows reporting of wave count. The attribute
258 * handle encapsulates GPU device it is associated with, thereby allowing collection
259 * of waves in flight, etc
260 *
261 * @buffer: Handle of user provided buffer updated with wave count
262 *
263 * Return: Number of bytes written to user buffer or an error value
264 */
kfd_get_cu_occupancy(struct attribute * attr,char * buffer)265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267 int cu_cnt;
268 int wave_cnt;
269 int max_waves_per_cu;
270 struct kfd_dev *dev = NULL;
271 struct kfd_process *proc = NULL;
272 struct kfd_process_device *pdd = NULL;
273
274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
275 dev = pdd->dev;
276 if (dev->kfd2kgd->get_cu_occupancy == NULL)
277 return -EINVAL;
278
279 cu_cnt = 0;
280 proc = pdd->process;
281 if (pdd->qpd.queue_count == 0) {
282 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
283 dev->id, proc->pasid);
284 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
285 }
286
287 /* Collect wave count from device if it supports */
288 wave_cnt = 0;
289 max_waves_per_cu = 0;
290 dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
291 &max_waves_per_cu);
292
293 /* Translate wave count to number of compute units */
294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
295 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
296 }
297
kfd_procfs_show(struct kobject * kobj,struct attribute * attr,char * buffer)298 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
299 char *buffer)
300 {
301 if (strcmp(attr->name, "pasid") == 0) {
302 struct kfd_process *p = container_of(attr, struct kfd_process,
303 attr_pasid);
304
305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
306 } else if (strncmp(attr->name, "vram_", 5) == 0) {
307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
308 attr_vram);
309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
310 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
312 attr_sdma);
313 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
314
315 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
316 kfd_sdma_activity_worker);
317
318 sdma_activity_work_handler.pdd = pdd;
319 sdma_activity_work_handler.sdma_activity_counter = 0;
320
321 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
322
323 flush_work(&sdma_activity_work_handler.sdma_activity_work);
324
325 return snprintf(buffer, PAGE_SIZE, "%llu\n",
326 (sdma_activity_work_handler.sdma_activity_counter)/
327 SDMA_ACTIVITY_DIVISOR);
328 } else {
329 pr_err("Invalid attribute");
330 return -EINVAL;
331 }
332
333 return 0;
334 }
335
kfd_procfs_kobj_release(struct kobject * kobj)336 static void kfd_procfs_kobj_release(struct kobject *kobj)
337 {
338 kfree(kobj);
339 }
340
341 static const struct sysfs_ops kfd_procfs_ops = {
342 .show = kfd_procfs_show,
343 };
344
345 static struct kobj_type procfs_type = {
346 .release = kfd_procfs_kobj_release,
347 .sysfs_ops = &kfd_procfs_ops,
348 };
349
kfd_procfs_init(void)350 void kfd_procfs_init(void)
351 {
352 int ret = 0;
353
354 procfs.kobj = kfd_alloc_struct(procfs.kobj);
355 if (!procfs.kobj)
356 return;
357
358 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
359 &kfd_device->kobj, "proc");
360 if (ret) {
361 pr_warn("Could not create procfs proc folder");
362 /* If we fail to create the procfs, clean up */
363 kfd_procfs_shutdown();
364 }
365 }
366
kfd_procfs_shutdown(void)367 void kfd_procfs_shutdown(void)
368 {
369 if (procfs.kobj) {
370 kobject_del(procfs.kobj);
371 kobject_put(procfs.kobj);
372 procfs.kobj = NULL;
373 }
374 }
375
kfd_procfs_queue_show(struct kobject * kobj,struct attribute * attr,char * buffer)376 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
377 struct attribute *attr, char *buffer)
378 {
379 struct queue *q = container_of(kobj, struct queue, kobj);
380
381 if (!strcmp(attr->name, "size"))
382 return snprintf(buffer, PAGE_SIZE, "%llu",
383 q->properties.queue_size);
384 else if (!strcmp(attr->name, "type"))
385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
386 else if (!strcmp(attr->name, "gpuid"))
387 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
388 else
389 pr_err("Invalid attribute");
390
391 return 0;
392 }
393
kfd_procfs_stats_show(struct kobject * kobj,struct attribute * attr,char * buffer)394 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
395 struct attribute *attr, char *buffer)
396 {
397 if (strcmp(attr->name, "evicted_ms") == 0) {
398 struct kfd_process_device *pdd = container_of(attr,
399 struct kfd_process_device,
400 attr_evict);
401 uint64_t evict_jiffies;
402
403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
404
405 return snprintf(buffer,
406 PAGE_SIZE,
407 "%llu\n",
408 jiffies64_to_msecs(evict_jiffies));
409
410 /* Sysfs handle that gets CU occupancy is per device */
411 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
412 return kfd_get_cu_occupancy(attr, buffer);
413 } else {
414 pr_err("Invalid attribute");
415 }
416
417 return 0;
418 }
419
420 static struct attribute attr_queue_size = {
421 .name = "size",
422 .mode = KFD_SYSFS_FILE_MODE
423 };
424
425 static struct attribute attr_queue_type = {
426 .name = "type",
427 .mode = KFD_SYSFS_FILE_MODE
428 };
429
430 static struct attribute attr_queue_gpuid = {
431 .name = "gpuid",
432 .mode = KFD_SYSFS_FILE_MODE
433 };
434
435 static struct attribute *procfs_queue_attrs[] = {
436 &attr_queue_size,
437 &attr_queue_type,
438 &attr_queue_gpuid,
439 NULL
440 };
441
442 static const struct sysfs_ops procfs_queue_ops = {
443 .show = kfd_procfs_queue_show,
444 };
445
446 static struct kobj_type procfs_queue_type = {
447 .sysfs_ops = &procfs_queue_ops,
448 .default_attrs = procfs_queue_attrs,
449 };
450
451 static const struct sysfs_ops procfs_stats_ops = {
452 .show = kfd_procfs_stats_show,
453 };
454
455 static struct attribute *procfs_stats_attrs[] = {
456 NULL
457 };
458
459 static struct kobj_type procfs_stats_type = {
460 .sysfs_ops = &procfs_stats_ops,
461 .default_attrs = procfs_stats_attrs,
462 };
463
kfd_procfs_add_queue(struct queue * q)464 int kfd_procfs_add_queue(struct queue *q)
465 {
466 struct kfd_process *proc;
467 int ret;
468
469 if (!q || !q->process)
470 return -EINVAL;
471 proc = q->process;
472
473 /* Create proc/<pid>/queues/<queue id> folder */
474 if (!proc->kobj_queues)
475 return -EFAULT;
476 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
477 proc->kobj_queues, "%u", q->properties.queue_id);
478 if (ret < 0) {
479 pr_warn("Creating proc/<pid>/queues/%u failed",
480 q->properties.queue_id);
481 kobject_put(&q->kobj);
482 return ret;
483 }
484
485 return 0;
486 }
487
kfd_sysfs_create_file(struct kfd_process * p,struct attribute * attr,char * name)488 static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
489 char *name)
490 {
491 int ret = 0;
492
493 if (!p || !attr || !name)
494 return -EINVAL;
495
496 attr->name = name;
497 attr->mode = KFD_SYSFS_FILE_MODE;
498 sysfs_attr_init(attr);
499
500 ret = sysfs_create_file(p->kobj, attr);
501
502 return ret;
503 }
504
kfd_procfs_add_sysfs_stats(struct kfd_process * p)505 static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
506 {
507 int ret = 0;
508 struct kfd_process_device *pdd;
509 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
510
511 if (!p)
512 return -EINVAL;
513
514 if (!p->kobj)
515 return -EFAULT;
516
517 /*
518 * Create sysfs files for each GPU:
519 * - proc/<pid>/stats_<gpuid>/
520 * - proc/<pid>/stats_<gpuid>/evicted_ms
521 * - proc/<pid>/stats_<gpuid>/cu_occupancy
522 */
523 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
524 struct kobject *kobj_stats;
525
526 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
527 "stats_%u", pdd->dev->id);
528 kobj_stats = kfd_alloc_struct(kobj_stats);
529 if (!kobj_stats)
530 return -ENOMEM;
531
532 ret = kobject_init_and_add(kobj_stats,
533 &procfs_stats_type,
534 p->kobj,
535 stats_dir_filename);
536
537 if (ret) {
538 pr_warn("Creating KFD proc/stats_%s folder failed",
539 stats_dir_filename);
540 kobject_put(kobj_stats);
541 goto err;
542 }
543
544 pdd->kobj_stats = kobj_stats;
545 pdd->attr_evict.name = "evicted_ms";
546 pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
547 sysfs_attr_init(&pdd->attr_evict);
548 ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
549 if (ret)
550 pr_warn("Creating eviction stats for gpuid %d failed",
551 (int)pdd->dev->id);
552
553 /* Add sysfs file to report compute unit occupancy */
554 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
555 pdd->attr_cu_occupancy.name = "cu_occupancy";
556 pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
557 sysfs_attr_init(&pdd->attr_cu_occupancy);
558 ret = sysfs_create_file(kobj_stats,
559 &pdd->attr_cu_occupancy);
560 if (ret)
561 pr_warn("Creating %s failed for gpuid: %d",
562 pdd->attr_cu_occupancy.name,
563 (int)pdd->dev->id);
564 }
565 }
566 err:
567 return ret;
568 }
569
570
kfd_procfs_add_sysfs_files(struct kfd_process * p)571 static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
572 {
573 int ret = 0;
574 struct kfd_process_device *pdd;
575
576 if (!p)
577 return -EINVAL;
578
579 if (!p->kobj)
580 return -EFAULT;
581
582 /*
583 * Create sysfs files for each GPU:
584 * - proc/<pid>/vram_<gpuid>
585 * - proc/<pid>/sdma_<gpuid>
586 */
587 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
588 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
589 pdd->dev->id);
590 ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
591 if (ret)
592 pr_warn("Creating vram usage for gpu id %d failed",
593 (int)pdd->dev->id);
594
595 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
596 pdd->dev->id);
597 ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
598 if (ret)
599 pr_warn("Creating sdma usage for gpu id %d failed",
600 (int)pdd->dev->id);
601 }
602
603 return ret;
604 }
605
kfd_procfs_del_queue(struct queue * q)606 void kfd_procfs_del_queue(struct queue *q)
607 {
608 if (!q)
609 return;
610
611 kobject_del(&q->kobj);
612 kobject_put(&q->kobj);
613 }
614
kfd_process_create_wq(void)615 int kfd_process_create_wq(void)
616 {
617 if (!kfd_process_wq)
618 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
619 if (!kfd_restore_wq)
620 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
621
622 if (!kfd_process_wq || !kfd_restore_wq) {
623 kfd_process_destroy_wq();
624 return -ENOMEM;
625 }
626
627 return 0;
628 }
629
kfd_process_destroy_wq(void)630 void kfd_process_destroy_wq(void)
631 {
632 if (kfd_process_wq) {
633 destroy_workqueue(kfd_process_wq);
634 kfd_process_wq = NULL;
635 }
636 if (kfd_restore_wq) {
637 destroy_workqueue(kfd_restore_wq);
638 kfd_restore_wq = NULL;
639 }
640 }
641
kfd_process_free_gpuvm(struct kgd_mem * mem,struct kfd_process_device * pdd)642 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
643 struct kfd_process_device *pdd)
644 {
645 struct kfd_dev *dev = pdd->dev;
646
647 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
648 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
649 }
650
651 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
652 * This function should be only called right after the process
653 * is created and when kfd_processes_mutex is still being held
654 * to avoid concurrency. Because of that exclusiveness, we do
655 * not need to take p->mutex.
656 */
kfd_process_alloc_gpuvm(struct kfd_process_device * pdd,uint64_t gpu_va,uint32_t size,uint32_t flags,void ** kptr)657 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
658 uint64_t gpu_va, uint32_t size,
659 uint32_t flags, void **kptr)
660 {
661 struct kfd_dev *kdev = pdd->dev;
662 struct kgd_mem *mem = NULL;
663 int handle;
664 int err;
665
666 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
667 pdd->vm, &mem, NULL, flags);
668 if (err)
669 goto err_alloc_mem;
670
671 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
672 if (err)
673 goto err_map_mem;
674
675 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
676 if (err) {
677 pr_debug("Sync memory failed, wait interrupted by user signal\n");
678 goto sync_memory_failed;
679 }
680
681 /* Create an obj handle so kfd_process_device_remove_obj_handle
682 * will take care of the bo removal when the process finishes.
683 * We do not need to take p->mutex, because the process is just
684 * created and the ioctls have not had the chance to run.
685 */
686 handle = kfd_process_device_create_obj_handle(pdd, mem);
687
688 if (handle < 0) {
689 err = handle;
690 goto free_gpuvm;
691 }
692
693 if (kptr) {
694 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
695 (struct kgd_mem *)mem, kptr, NULL);
696 if (err) {
697 pr_debug("Map GTT BO to kernel failed\n");
698 goto free_obj_handle;
699 }
700 }
701
702 return err;
703
704 free_obj_handle:
705 kfd_process_device_remove_obj_handle(pdd, handle);
706 free_gpuvm:
707 sync_memory_failed:
708 kfd_process_free_gpuvm(mem, pdd);
709 return err;
710
711 err_map_mem:
712 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
713 err_alloc_mem:
714 *kptr = NULL;
715 return err;
716 }
717
718 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
719 * process for IB usage The memory reserved is for KFD to submit
720 * IB to AMDGPU from kernel. If the memory is reserved
721 * successfully, ib_kaddr will have the CPU/kernel
722 * address. Check ib_kaddr before accessing the memory.
723 */
kfd_process_device_reserve_ib_mem(struct kfd_process_device * pdd)724 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
725 {
726 struct qcm_process_device *qpd = &pdd->qpd;
727 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
728 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
729 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
730 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
731 void *kaddr;
732 int ret;
733
734 if (qpd->ib_kaddr || !qpd->ib_base)
735 return 0;
736
737 /* ib_base is only set for dGPU */
738 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
739 &kaddr);
740 if (ret)
741 return ret;
742
743 qpd->ib_kaddr = kaddr;
744
745 return 0;
746 }
747
kfd_create_process(struct file * filep)748 struct kfd_process *kfd_create_process(struct file *filep)
749 {
750 struct kfd_process *process;
751 struct task_struct *thread = current;
752 int ret;
753
754 if (!thread->mm)
755 return ERR_PTR(-EINVAL);
756
757 /* Only the pthreads threading model is supported. */
758 if (thread->group_leader->mm != thread->mm)
759 return ERR_PTR(-EINVAL);
760
761 /*
762 * take kfd processes mutex before starting of process creation
763 * so there won't be a case where two threads of the same process
764 * create two kfd_process structures
765 */
766 mutex_lock(&kfd_processes_mutex);
767
768 /* A prior open of /dev/kfd could have already created the process. */
769 process = find_process(thread);
770 if (process) {
771 pr_debug("Process already found\n");
772 } else {
773 process = create_process(thread);
774 if (IS_ERR(process))
775 goto out;
776
777 ret = kfd_process_init_cwsr_apu(process, filep);
778 if (ret) {
779 process = ERR_PTR(ret);
780 goto out;
781 }
782
783 if (!procfs.kobj)
784 goto out;
785
786 process->kobj = kfd_alloc_struct(process->kobj);
787 if (!process->kobj) {
788 pr_warn("Creating procfs kobject failed");
789 goto out;
790 }
791 ret = kobject_init_and_add(process->kobj, &procfs_type,
792 procfs.kobj, "%d",
793 (int)process->lead_thread->pid);
794 if (ret) {
795 pr_warn("Creating procfs pid directory failed");
796 kobject_put(process->kobj);
797 goto out;
798 }
799
800 process->attr_pasid.name = "pasid";
801 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
802 sysfs_attr_init(&process->attr_pasid);
803 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
804 if (ret)
805 pr_warn("Creating pasid for pid %d failed",
806 (int)process->lead_thread->pid);
807
808 process->kobj_queues = kobject_create_and_add("queues",
809 process->kobj);
810 if (!process->kobj_queues)
811 pr_warn("Creating KFD proc/queues folder failed");
812
813 ret = kfd_procfs_add_sysfs_stats(process);
814 if (ret)
815 pr_warn("Creating sysfs stats dir for pid %d failed",
816 (int)process->lead_thread->pid);
817
818 ret = kfd_procfs_add_sysfs_files(process);
819 if (ret)
820 pr_warn("Creating sysfs usage file for pid %d failed",
821 (int)process->lead_thread->pid);
822 }
823 out:
824 if (!IS_ERR(process))
825 kref_get(&process->ref);
826 mutex_unlock(&kfd_processes_mutex);
827
828 return process;
829 }
830
kfd_get_process(const struct task_struct * thread)831 struct kfd_process *kfd_get_process(const struct task_struct *thread)
832 {
833 struct kfd_process *process;
834
835 if (!thread->mm)
836 return ERR_PTR(-EINVAL);
837
838 /* Only the pthreads threading model is supported. */
839 if (thread->group_leader->mm != thread->mm)
840 return ERR_PTR(-EINVAL);
841
842 process = find_process(thread);
843 if (!process)
844 return ERR_PTR(-EINVAL);
845
846 return process;
847 }
848
find_process_by_mm(const struct mm_struct * mm)849 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
850 {
851 struct kfd_process *process;
852
853 hash_for_each_possible_rcu(kfd_processes_table, process,
854 kfd_processes, (uintptr_t)mm)
855 if (process->mm == mm)
856 return process;
857
858 return NULL;
859 }
860
find_process(const struct task_struct * thread)861 static struct kfd_process *find_process(const struct task_struct *thread)
862 {
863 struct kfd_process *p;
864 int idx;
865
866 idx = srcu_read_lock(&kfd_processes_srcu);
867 p = find_process_by_mm(thread->mm);
868 srcu_read_unlock(&kfd_processes_srcu, idx);
869
870 return p;
871 }
872
kfd_unref_process(struct kfd_process * p)873 void kfd_unref_process(struct kfd_process *p)
874 {
875 kref_put(&p->ref, kfd_process_ref_release);
876 }
877
kfd_process_device_free_bos(struct kfd_process_device * pdd)878 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
879 {
880 struct kfd_process *p = pdd->process;
881 void *mem;
882 int id;
883
884 /*
885 * Remove all handles from idr and release appropriate
886 * local memory object
887 */
888 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
889 struct kfd_process_device *peer_pdd;
890
891 list_for_each_entry(peer_pdd, &p->per_device_data,
892 per_device_list) {
893 if (!peer_pdd->vm)
894 continue;
895 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
896 peer_pdd->dev->kgd, mem, peer_pdd->vm);
897 }
898
899 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
900 kfd_process_device_remove_obj_handle(pdd, id);
901 }
902 }
903
kfd_process_free_outstanding_kfd_bos(struct kfd_process * p)904 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
905 {
906 struct kfd_process_device *pdd;
907
908 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
909 kfd_process_device_free_bos(pdd);
910 }
911
kfd_process_destroy_pdds(struct kfd_process * p)912 static void kfd_process_destroy_pdds(struct kfd_process *p)
913 {
914 struct kfd_process_device *pdd, *temp;
915
916 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
917 per_device_list) {
918 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
919 pdd->dev->id, p->pasid);
920
921 if (pdd->drm_file) {
922 amdgpu_amdkfd_gpuvm_release_process_vm(
923 pdd->dev->kgd, pdd->vm);
924 fput(pdd->drm_file);
925 }
926 else if (pdd->vm)
927 amdgpu_amdkfd_gpuvm_destroy_process_vm(
928 pdd->dev->kgd, pdd->vm);
929
930 list_del(&pdd->per_device_list);
931
932 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
933 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
934 get_order(KFD_CWSR_TBA_TMA_SIZE));
935
936 kfree(pdd->qpd.doorbell_bitmap);
937 idr_destroy(&pdd->alloc_idr);
938
939 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
940
941 /*
942 * before destroying pdd, make sure to report availability
943 * for auto suspend
944 */
945 if (pdd->runtime_inuse) {
946 pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
947 pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
948 pdd->runtime_inuse = false;
949 }
950
951 kfree(pdd);
952 }
953 }
954
955 /* No process locking is needed in this function, because the process
956 * is not findable any more. We must assume that no other thread is
957 * using it any more, otherwise we couldn't safely free the process
958 * structure in the end.
959 */
kfd_process_wq_release(struct work_struct * work)960 static void kfd_process_wq_release(struct work_struct *work)
961 {
962 struct kfd_process *p = container_of(work, struct kfd_process,
963 release_work);
964 struct kfd_process_device *pdd;
965
966 /* Remove the procfs files */
967 if (p->kobj) {
968 sysfs_remove_file(p->kobj, &p->attr_pasid);
969 kobject_del(p->kobj_queues);
970 kobject_put(p->kobj_queues);
971 p->kobj_queues = NULL;
972
973 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
974 sysfs_remove_file(p->kobj, &pdd->attr_vram);
975 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
976 sysfs_remove_file(p->kobj, &pdd->attr_evict);
977 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
978 sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
979 kobject_del(pdd->kobj_stats);
980 kobject_put(pdd->kobj_stats);
981 pdd->kobj_stats = NULL;
982 }
983
984 kobject_del(p->kobj);
985 kobject_put(p->kobj);
986 p->kobj = NULL;
987 }
988
989 kfd_iommu_unbind_process(p);
990
991 kfd_process_free_outstanding_kfd_bos(p);
992
993 kfd_process_destroy_pdds(p);
994 dma_fence_put(p->ef);
995
996 kfd_event_free_process(p);
997
998 kfd_pasid_free(p->pasid);
999 mutex_destroy(&p->mutex);
1000
1001 put_task_struct(p->lead_thread);
1002
1003 kfree(p);
1004 }
1005
kfd_process_ref_release(struct kref * ref)1006 static void kfd_process_ref_release(struct kref *ref)
1007 {
1008 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1009
1010 INIT_WORK(&p->release_work, kfd_process_wq_release);
1011 queue_work(kfd_process_wq, &p->release_work);
1012 }
1013
kfd_process_free_notifier(struct mmu_notifier * mn)1014 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1015 {
1016 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1017 }
1018
kfd_process_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)1019 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1020 struct mm_struct *mm)
1021 {
1022 struct kfd_process *p;
1023 struct kfd_process_device *pdd = NULL;
1024
1025 /*
1026 * The kfd_process structure can not be free because the
1027 * mmu_notifier srcu is read locked
1028 */
1029 p = container_of(mn, struct kfd_process, mmu_notifier);
1030 if (WARN_ON(p->mm != mm))
1031 return;
1032
1033 mutex_lock(&kfd_processes_mutex);
1034 hash_del_rcu(&p->kfd_processes);
1035 mutex_unlock(&kfd_processes_mutex);
1036 synchronize_srcu(&kfd_processes_srcu);
1037
1038 cancel_delayed_work_sync(&p->eviction_work);
1039 cancel_delayed_work_sync(&p->restore_work);
1040
1041 mutex_lock(&p->mutex);
1042
1043 /* Iterate over all process device data structures and if the
1044 * pdd is in debug mode, we should first force unregistration,
1045 * then we will be able to destroy the queues
1046 */
1047 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1048 struct kfd_dev *dev = pdd->dev;
1049
1050 mutex_lock(kfd_get_dbgmgr_mutex());
1051 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
1052 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
1053 kfd_dbgmgr_destroy(dev->dbgmgr);
1054 dev->dbgmgr = NULL;
1055 }
1056 }
1057 mutex_unlock(kfd_get_dbgmgr_mutex());
1058 }
1059
1060 kfd_process_dequeue_from_all_devices(p);
1061 pqm_uninit(&p->pqm);
1062
1063 /* Indicate to other users that MM is no longer valid */
1064 p->mm = NULL;
1065 /* Signal the eviction fence after user mode queues are
1066 * destroyed. This allows any BOs to be freed without
1067 * triggering pointless evictions or waiting for fences.
1068 */
1069 dma_fence_signal(p->ef);
1070
1071 mutex_unlock(&p->mutex);
1072
1073 mmu_notifier_put(&p->mmu_notifier);
1074 }
1075
1076 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1077 .release = kfd_process_notifier_release,
1078 .free_notifier = kfd_process_free_notifier,
1079 };
1080
kfd_process_init_cwsr_apu(struct kfd_process * p,struct file * filep)1081 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1082 {
1083 unsigned long offset;
1084 struct kfd_process_device *pdd;
1085
1086 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1087 struct kfd_dev *dev = pdd->dev;
1088 struct qcm_process_device *qpd = &pdd->qpd;
1089
1090 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1091 continue;
1092
1093 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1094 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1095 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1096 MAP_SHARED, offset);
1097
1098 if (IS_ERR_VALUE(qpd->tba_addr)) {
1099 int err = qpd->tba_addr;
1100
1101 pr_err("Failure to set tba address. error %d.\n", err);
1102 qpd->tba_addr = 0;
1103 qpd->cwsr_kaddr = NULL;
1104 return err;
1105 }
1106
1107 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1108
1109 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1110 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1111 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1112 }
1113
1114 return 0;
1115 }
1116
kfd_process_device_init_cwsr_dgpu(struct kfd_process_device * pdd)1117 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1118 {
1119 struct kfd_dev *dev = pdd->dev;
1120 struct qcm_process_device *qpd = &pdd->qpd;
1121 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1122 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1123 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1124 void *kaddr;
1125 int ret;
1126
1127 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1128 return 0;
1129
1130 /* cwsr_base is only set for dGPU */
1131 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1132 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
1133 if (ret)
1134 return ret;
1135
1136 qpd->cwsr_kaddr = kaddr;
1137 qpd->tba_addr = qpd->cwsr_base;
1138
1139 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1140
1141 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1142 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1143 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1144
1145 return 0;
1146 }
1147
1148 /*
1149 * On return the kfd_process is fully operational and will be freed when the
1150 * mm is released
1151 */
create_process(const struct task_struct * thread)1152 static struct kfd_process *create_process(const struct task_struct *thread)
1153 {
1154 struct kfd_process *process;
1155 int err = -ENOMEM;
1156
1157 process = kzalloc(sizeof(*process), GFP_KERNEL);
1158 if (!process)
1159 goto err_alloc_process;
1160
1161 kref_init(&process->ref);
1162 mutex_init(&process->mutex);
1163 process->mm = thread->mm;
1164 process->lead_thread = thread->group_leader;
1165 INIT_LIST_HEAD(&process->per_device_data);
1166 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1167 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1168 process->last_restore_timestamp = get_jiffies_64();
1169 kfd_event_init_process(process);
1170 process->is_32bit_user_mode = in_compat_syscall();
1171
1172 process->pasid = kfd_pasid_alloc();
1173 if (process->pasid == 0)
1174 goto err_alloc_pasid;
1175
1176 err = pqm_init(&process->pqm, process);
1177 if (err != 0)
1178 goto err_process_pqm_init;
1179
1180 /* init process apertures*/
1181 err = kfd_init_apertures(process);
1182 if (err != 0)
1183 goto err_init_apertures;
1184
1185 /* Must be last, have to use release destruction after this */
1186 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
1187 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
1188 if (err)
1189 goto err_register_notifier;
1190
1191 get_task_struct(process->lead_thread);
1192 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1193 (uintptr_t)process->mm);
1194
1195 return process;
1196
1197 err_register_notifier:
1198 kfd_process_free_outstanding_kfd_bos(process);
1199 kfd_process_destroy_pdds(process);
1200 err_init_apertures:
1201 pqm_uninit(&process->pqm);
1202 err_process_pqm_init:
1203 kfd_pasid_free(process->pasid);
1204 err_alloc_pasid:
1205 mutex_destroy(&process->mutex);
1206 kfree(process);
1207 err_alloc_process:
1208 return ERR_PTR(err);
1209 }
1210
init_doorbell_bitmap(struct qcm_process_device * qpd,struct kfd_dev * dev)1211 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1212 struct kfd_dev *dev)
1213 {
1214 unsigned int i;
1215 int range_start = dev->shared_resources.non_cp_doorbells_start;
1216 int range_end = dev->shared_resources.non_cp_doorbells_end;
1217
1218 if (!KFD_IS_SOC15(dev->device_info->asic_family))
1219 return 0;
1220
1221 qpd->doorbell_bitmap =
1222 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1223 BITS_PER_BYTE), GFP_KERNEL);
1224 if (!qpd->doorbell_bitmap)
1225 return -ENOMEM;
1226
1227 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1228 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1229 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1230 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1231 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1232
1233 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1234 if (i >= range_start && i <= range_end) {
1235 set_bit(i, qpd->doorbell_bitmap);
1236 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1237 qpd->doorbell_bitmap);
1238 }
1239 }
1240
1241 return 0;
1242 }
1243
kfd_get_process_device_data(struct kfd_dev * dev,struct kfd_process * p)1244 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1245 struct kfd_process *p)
1246 {
1247 struct kfd_process_device *pdd = NULL;
1248
1249 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
1250 if (pdd->dev == dev)
1251 return pdd;
1252
1253 return NULL;
1254 }
1255
kfd_create_process_device_data(struct kfd_dev * dev,struct kfd_process * p)1256 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1257 struct kfd_process *p)
1258 {
1259 struct kfd_process_device *pdd = NULL;
1260
1261 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1262 if (!pdd)
1263 return NULL;
1264
1265 if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
1266 pr_err("Failed to alloc doorbell for pdd\n");
1267 goto err_free_pdd;
1268 }
1269
1270 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1271 pr_err("Failed to init doorbell for process\n");
1272 goto err_free_pdd;
1273 }
1274
1275 pdd->dev = dev;
1276 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1277 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1278 pdd->qpd.dqm = dev->dqm;
1279 pdd->qpd.pqm = &p->pqm;
1280 pdd->qpd.evicted = 0;
1281 pdd->qpd.mapped_gws_queue = false;
1282 pdd->process = p;
1283 pdd->bound = PDD_UNBOUND;
1284 pdd->already_dequeued = false;
1285 pdd->runtime_inuse = false;
1286 pdd->vram_usage = 0;
1287 pdd->sdma_past_activity_counter = 0;
1288 atomic64_set(&pdd->evict_duration_counter, 0);
1289 list_add(&pdd->per_device_list, &p->per_device_data);
1290
1291 /* Init idr used for memory handle translation */
1292 idr_init(&pdd->alloc_idr);
1293
1294 return pdd;
1295
1296 err_free_pdd:
1297 kfree(pdd);
1298 return NULL;
1299 }
1300
1301 /**
1302 * kfd_process_device_init_vm - Initialize a VM for a process-device
1303 *
1304 * @pdd: The process-device
1305 * @drm_file: Optional pointer to a DRM file descriptor
1306 *
1307 * If @drm_file is specified, it will be used to acquire the VM from
1308 * that file descriptor. If successful, the @pdd takes ownership of
1309 * the file descriptor.
1310 *
1311 * If @drm_file is NULL, a new VM is created.
1312 *
1313 * Returns 0 on success, -errno on failure.
1314 */
kfd_process_device_init_vm(struct kfd_process_device * pdd,struct file * drm_file)1315 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1316 struct file *drm_file)
1317 {
1318 struct kfd_process *p;
1319 struct kfd_dev *dev;
1320 int ret;
1321
1322 if (pdd->vm)
1323 return drm_file ? -EBUSY : 0;
1324
1325 p = pdd->process;
1326 dev = pdd->dev;
1327
1328 if (drm_file)
1329 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
1330 dev->kgd, drm_file, p->pasid,
1331 &pdd->vm, &p->kgd_process_info, &p->ef);
1332 else
1333 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
1334 &pdd->vm, &p->kgd_process_info, &p->ef);
1335 if (ret) {
1336 pr_err("Failed to create process VM object\n");
1337 return ret;
1338 }
1339
1340 amdgpu_vm_set_task_info(pdd->vm);
1341
1342 ret = kfd_process_device_reserve_ib_mem(pdd);
1343 if (ret)
1344 goto err_reserve_ib_mem;
1345 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1346 if (ret)
1347 goto err_init_cwsr;
1348
1349 pdd->drm_file = drm_file;
1350
1351 return 0;
1352
1353 err_init_cwsr:
1354 err_reserve_ib_mem:
1355 kfd_process_device_free_bos(pdd);
1356 if (!drm_file)
1357 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
1358 pdd->vm = NULL;
1359
1360 return ret;
1361 }
1362
1363 /*
1364 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1365 * to the device.
1366 * Unbinding occurs when the process dies or the device is removed.
1367 *
1368 * Assumes that the process lock is held.
1369 */
kfd_bind_process_to_device(struct kfd_dev * dev,struct kfd_process * p)1370 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1371 struct kfd_process *p)
1372 {
1373 struct kfd_process_device *pdd;
1374 int err;
1375
1376 pdd = kfd_get_process_device_data(dev, p);
1377 if (!pdd) {
1378 pr_err("Process device data doesn't exist\n");
1379 return ERR_PTR(-ENOMEM);
1380 }
1381
1382 /*
1383 * signal runtime-pm system to auto resume and prevent
1384 * further runtime suspend once device pdd is created until
1385 * pdd is destroyed.
1386 */
1387 if (!pdd->runtime_inuse) {
1388 err = pm_runtime_get_sync(dev->ddev->dev);
1389 if (err < 0) {
1390 pm_runtime_put_autosuspend(dev->ddev->dev);
1391 return ERR_PTR(err);
1392 }
1393 }
1394
1395 err = kfd_iommu_bind_process_to_device(pdd);
1396 if (err)
1397 goto out;
1398
1399 err = kfd_process_device_init_vm(pdd, NULL);
1400 if (err)
1401 goto out;
1402
1403 /*
1404 * make sure that runtime_usage counter is incremented just once
1405 * per pdd
1406 */
1407 pdd->runtime_inuse = true;
1408
1409 return pdd;
1410
1411 out:
1412 /* balance runpm reference count and exit with error */
1413 if (!pdd->runtime_inuse) {
1414 pm_runtime_mark_last_busy(dev->ddev->dev);
1415 pm_runtime_put_autosuspend(dev->ddev->dev);
1416 }
1417
1418 return ERR_PTR(err);
1419 }
1420
kfd_get_first_process_device_data(struct kfd_process * p)1421 struct kfd_process_device *kfd_get_first_process_device_data(
1422 struct kfd_process *p)
1423 {
1424 return list_first_entry(&p->per_device_data,
1425 struct kfd_process_device,
1426 per_device_list);
1427 }
1428
kfd_get_next_process_device_data(struct kfd_process * p,struct kfd_process_device * pdd)1429 struct kfd_process_device *kfd_get_next_process_device_data(
1430 struct kfd_process *p,
1431 struct kfd_process_device *pdd)
1432 {
1433 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
1434 return NULL;
1435 return list_next_entry(pdd, per_device_list);
1436 }
1437
kfd_has_process_device_data(struct kfd_process * p)1438 bool kfd_has_process_device_data(struct kfd_process *p)
1439 {
1440 return !(list_empty(&p->per_device_data));
1441 }
1442
1443 /* Create specific handle mapped to mem from process local memory idr
1444 * Assumes that the process lock is held.
1445 */
kfd_process_device_create_obj_handle(struct kfd_process_device * pdd,void * mem)1446 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1447 void *mem)
1448 {
1449 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1450 }
1451
1452 /* Translate specific handle from process local memory idr
1453 * Assumes that the process lock is held.
1454 */
kfd_process_device_translate_handle(struct kfd_process_device * pdd,int handle)1455 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1456 int handle)
1457 {
1458 if (handle < 0)
1459 return NULL;
1460
1461 return idr_find(&pdd->alloc_idr, handle);
1462 }
1463
1464 /* Remove specific handle from process local memory idr
1465 * Assumes that the process lock is held.
1466 */
kfd_process_device_remove_obj_handle(struct kfd_process_device * pdd,int handle)1467 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1468 int handle)
1469 {
1470 if (handle >= 0)
1471 idr_remove(&pdd->alloc_idr, handle);
1472 }
1473
1474 /* This increments the process->ref counter. */
kfd_lookup_process_by_pasid(u32 pasid)1475 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1476 {
1477 struct kfd_process *p, *ret_p = NULL;
1478 unsigned int temp;
1479
1480 int idx = srcu_read_lock(&kfd_processes_srcu);
1481
1482 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1483 if (p->pasid == pasid) {
1484 kref_get(&p->ref);
1485 ret_p = p;
1486 break;
1487 }
1488 }
1489
1490 srcu_read_unlock(&kfd_processes_srcu, idx);
1491
1492 return ret_p;
1493 }
1494
1495 /* This increments the process->ref counter. */
kfd_lookup_process_by_mm(const struct mm_struct * mm)1496 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1497 {
1498 struct kfd_process *p;
1499
1500 int idx = srcu_read_lock(&kfd_processes_srcu);
1501
1502 p = find_process_by_mm(mm);
1503 if (p)
1504 kref_get(&p->ref);
1505
1506 srcu_read_unlock(&kfd_processes_srcu, idx);
1507
1508 return p;
1509 }
1510
1511 /* kfd_process_evict_queues - Evict all user queues of a process
1512 *
1513 * Eviction is reference-counted per process-device. This means multiple
1514 * evictions from different sources can be nested safely.
1515 */
kfd_process_evict_queues(struct kfd_process * p)1516 int kfd_process_evict_queues(struct kfd_process *p)
1517 {
1518 struct kfd_process_device *pdd;
1519 int r = 0;
1520 unsigned int n_evicted = 0;
1521
1522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1523 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1524 &pdd->qpd);
1525 if (r) {
1526 pr_err("Failed to evict process queues\n");
1527 goto fail;
1528 }
1529 n_evicted++;
1530 }
1531
1532 return r;
1533
1534 fail:
1535 /* To keep state consistent, roll back partial eviction by
1536 * restoring queues
1537 */
1538 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1539 if (n_evicted == 0)
1540 break;
1541 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1542 &pdd->qpd))
1543 pr_err("Failed to restore queues\n");
1544
1545 n_evicted--;
1546 }
1547
1548 return r;
1549 }
1550
1551 /* kfd_process_restore_queues - Restore all user queues of a process */
kfd_process_restore_queues(struct kfd_process * p)1552 int kfd_process_restore_queues(struct kfd_process *p)
1553 {
1554 struct kfd_process_device *pdd;
1555 int r, ret = 0;
1556
1557 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1558 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1559 &pdd->qpd);
1560 if (r) {
1561 pr_err("Failed to restore process queues\n");
1562 if (!ret)
1563 ret = r;
1564 }
1565 }
1566
1567 return ret;
1568 }
1569
evict_process_worker(struct work_struct * work)1570 static void evict_process_worker(struct work_struct *work)
1571 {
1572 int ret;
1573 struct kfd_process *p;
1574 struct delayed_work *dwork;
1575
1576 dwork = to_delayed_work(work);
1577
1578 /* Process termination destroys this worker thread. So during the
1579 * lifetime of this thread, kfd_process p will be valid
1580 */
1581 p = container_of(dwork, struct kfd_process, eviction_work);
1582 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1583 "Eviction fence mismatch\n");
1584
1585 /* Narrow window of overlap between restore and evict work
1586 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1587 * unreserves KFD BOs, it is possible to evicted again. But
1588 * restore has few more steps of finish. So lets wait for any
1589 * previous restore work to complete
1590 */
1591 flush_delayed_work(&p->restore_work);
1592
1593 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1594 ret = kfd_process_evict_queues(p);
1595 if (!ret) {
1596 dma_fence_signal(p->ef);
1597 dma_fence_put(p->ef);
1598 p->ef = NULL;
1599 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1600 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1601
1602 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1603 } else
1604 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1605 }
1606
restore_process_worker(struct work_struct * work)1607 static void restore_process_worker(struct work_struct *work)
1608 {
1609 struct delayed_work *dwork;
1610 struct kfd_process *p;
1611 int ret = 0;
1612
1613 dwork = to_delayed_work(work);
1614
1615 /* Process termination destroys this worker thread. So during the
1616 * lifetime of this thread, kfd_process p will be valid
1617 */
1618 p = container_of(dwork, struct kfd_process, restore_work);
1619 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1620
1621 /* Setting last_restore_timestamp before successful restoration.
1622 * Otherwise this would have to be set by KGD (restore_process_bos)
1623 * before KFD BOs are unreserved. If not, the process can be evicted
1624 * again before the timestamp is set.
1625 * If restore fails, the timestamp will be set again in the next
1626 * attempt. This would mean that the minimum GPU quanta would be
1627 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1628 * functions)
1629 */
1630
1631 p->last_restore_timestamp = get_jiffies_64();
1632 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1633 &p->ef);
1634 if (ret) {
1635 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1636 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1637 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1638 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1639 WARN(!ret, "reschedule restore work failed\n");
1640 return;
1641 }
1642
1643 ret = kfd_process_restore_queues(p);
1644 if (!ret)
1645 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1646 else
1647 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1648 }
1649
kfd_suspend_all_processes(void)1650 void kfd_suspend_all_processes(void)
1651 {
1652 struct kfd_process *p;
1653 unsigned int temp;
1654 int idx = srcu_read_lock(&kfd_processes_srcu);
1655
1656 WARN(debug_evictions, "Evicting all processes");
1657 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1658 cancel_delayed_work_sync(&p->eviction_work);
1659 cancel_delayed_work_sync(&p->restore_work);
1660
1661 if (kfd_process_evict_queues(p))
1662 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1663 dma_fence_signal(p->ef);
1664 dma_fence_put(p->ef);
1665 p->ef = NULL;
1666 }
1667 srcu_read_unlock(&kfd_processes_srcu, idx);
1668 }
1669
kfd_resume_all_processes(void)1670 int kfd_resume_all_processes(void)
1671 {
1672 struct kfd_process *p;
1673 unsigned int temp;
1674 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1675
1676 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1677 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1678 pr_err("Restore process %d failed during resume\n",
1679 p->pasid);
1680 ret = -EFAULT;
1681 }
1682 }
1683 srcu_read_unlock(&kfd_processes_srcu, idx);
1684 return ret;
1685 }
1686
kfd_reserved_mem_mmap(struct kfd_dev * dev,struct kfd_process * process,struct vm_area_struct * vma)1687 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1688 struct vm_area_struct *vma)
1689 {
1690 struct kfd_process_device *pdd;
1691 struct qcm_process_device *qpd;
1692
1693 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1694 pr_err("Incorrect CWSR mapping size.\n");
1695 return -EINVAL;
1696 }
1697
1698 pdd = kfd_get_process_device_data(dev, process);
1699 if (!pdd)
1700 return -EINVAL;
1701 qpd = &pdd->qpd;
1702
1703 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1704 get_order(KFD_CWSR_TBA_TMA_SIZE));
1705 if (!qpd->cwsr_kaddr) {
1706 pr_err("Error allocating per process CWSR buffer.\n");
1707 return -ENOMEM;
1708 }
1709
1710 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1711 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1712 /* Mapping pages to user process */
1713 return remap_pfn_range(vma, vma->vm_start,
1714 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1715 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1716 }
1717
kfd_flush_tlb(struct kfd_process_device * pdd)1718 void kfd_flush_tlb(struct kfd_process_device *pdd)
1719 {
1720 struct kfd_dev *dev = pdd->dev;
1721
1722 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1723 /* Nothing to flush until a VMID is assigned, which
1724 * only happens when the first queue is created.
1725 */
1726 if (pdd->qpd.vmid)
1727 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
1728 pdd->qpd.vmid);
1729 } else {
1730 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
1731 pdd->process->pasid);
1732 }
1733 }
1734
1735 #if defined(CONFIG_DEBUG_FS)
1736
kfd_debugfs_mqds_by_process(struct seq_file * m,void * data)1737 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1738 {
1739 struct kfd_process *p;
1740 unsigned int temp;
1741 int r = 0;
1742
1743 int idx = srcu_read_lock(&kfd_processes_srcu);
1744
1745 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1746 seq_printf(m, "Process %d PASID 0x%x:\n",
1747 p->lead_thread->tgid, p->pasid);
1748
1749 mutex_lock(&p->mutex);
1750 r = pqm_debugfs_mqds(m, &p->pqm);
1751 mutex_unlock(&p->mutex);
1752
1753 if (r)
1754 break;
1755 }
1756
1757 srcu_read_unlock(&kfd_processes_srcu, idx);
1758
1759 return r;
1760 }
1761
1762 #endif
1763
1764