Lines Matching refs:pdd

85 	struct kfd_process_device *pdd;  member
99 struct kfd_process_device *pdd; in kfd_sdma_activity_worker() local
114 pdd = workarea->pdd; in kfd_sdma_activity_worker()
115 if (!pdd) in kfd_sdma_activity_worker()
117 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker()
118 qpd = &pdd->qpd; in kfd_sdma_activity_worker()
173 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker()
183 mm = get_task_mm(pdd->process->lead_thread); in kfd_sdma_activity_worker()
210 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker()
272 struct kfd_process_device *pdd = NULL; in kfd_get_cu_occupancy() local
274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); in kfd_get_cu_occupancy()
275 dev = pdd->dev; in kfd_get_cu_occupancy()
280 proc = pdd->process; in kfd_get_cu_occupancy()
281 if (pdd->qpd.queue_count == 0) { in kfd_get_cu_occupancy()
307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, in kfd_procfs_show() local
309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); in kfd_procfs_show()
311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, in kfd_procfs_show() local
318 sdma_activity_work_handler.pdd = pdd; in kfd_procfs_show()
398 struct kfd_process_device *pdd = container_of(attr, in kfd_procfs_stats_show() local
403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); in kfd_procfs_stats_show()
508 struct kfd_process_device *pdd; in kfd_procfs_add_sysfs_stats() local
523 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_procfs_add_sysfs_stats()
527 "stats_%u", pdd->dev->id); in kfd_procfs_add_sysfs_stats()
544 pdd->kobj_stats = kobj_stats; in kfd_procfs_add_sysfs_stats()
545 pdd->attr_evict.name = "evicted_ms"; in kfd_procfs_add_sysfs_stats()
546 pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE; in kfd_procfs_add_sysfs_stats()
547 sysfs_attr_init(&pdd->attr_evict); in kfd_procfs_add_sysfs_stats()
548 ret = sysfs_create_file(kobj_stats, &pdd->attr_evict); in kfd_procfs_add_sysfs_stats()
551 (int)pdd->dev->id); in kfd_procfs_add_sysfs_stats()
554 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) { in kfd_procfs_add_sysfs_stats()
555 pdd->attr_cu_occupancy.name = "cu_occupancy"; in kfd_procfs_add_sysfs_stats()
556 pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE; in kfd_procfs_add_sysfs_stats()
557 sysfs_attr_init(&pdd->attr_cu_occupancy); in kfd_procfs_add_sysfs_stats()
559 &pdd->attr_cu_occupancy); in kfd_procfs_add_sysfs_stats()
562 pdd->attr_cu_occupancy.name, in kfd_procfs_add_sysfs_stats()
563 (int)pdd->dev->id); in kfd_procfs_add_sysfs_stats()
574 struct kfd_process_device *pdd; in kfd_procfs_add_sysfs_files() local
587 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_procfs_add_sysfs_files()
588 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", in kfd_procfs_add_sysfs_files()
589 pdd->dev->id); in kfd_procfs_add_sysfs_files()
590 ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename); in kfd_procfs_add_sysfs_files()
593 (int)pdd->dev->id); in kfd_procfs_add_sysfs_files()
595 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", in kfd_procfs_add_sysfs_files()
596 pdd->dev->id); in kfd_procfs_add_sysfs_files()
597 ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename); in kfd_procfs_add_sysfs_files()
600 (int)pdd->dev->id); in kfd_procfs_add_sysfs_files()
643 struct kfd_process_device *pdd) in kfd_process_free_gpuvm() argument
645 struct kfd_dev *dev = pdd->dev; in kfd_process_free_gpuvm()
647 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); in kfd_process_free_gpuvm()
657 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, in kfd_process_alloc_gpuvm() argument
661 struct kfd_dev *kdev = pdd->dev; in kfd_process_alloc_gpuvm()
667 pdd->vm, &mem, NULL, flags); in kfd_process_alloc_gpuvm()
671 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); in kfd_process_alloc_gpuvm()
686 handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_process_alloc_gpuvm()
705 kfd_process_device_remove_obj_handle(pdd, handle); in kfd_process_alloc_gpuvm()
708 kfd_process_free_gpuvm(mem, pdd); in kfd_process_alloc_gpuvm()
724 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) in kfd_process_device_reserve_ib_mem() argument
726 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_reserve_ib_mem()
738 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, in kfd_process_device_reserve_ib_mem()
878 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) in kfd_process_device_free_bos() argument
880 struct kfd_process *p = pdd->process; in kfd_process_device_free_bos()
888 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in kfd_process_device_free_bos()
899 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL); in kfd_process_device_free_bos()
900 kfd_process_device_remove_obj_handle(pdd, id); in kfd_process_device_free_bos()
906 struct kfd_process_device *pdd; in kfd_process_free_outstanding_kfd_bos() local
908 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_process_free_outstanding_kfd_bos()
909 kfd_process_device_free_bos(pdd); in kfd_process_free_outstanding_kfd_bos()
914 struct kfd_process_device *pdd, *temp; in kfd_process_destroy_pdds() local
916 list_for_each_entry_safe(pdd, temp, &p->per_device_data, in kfd_process_destroy_pdds()
919 pdd->dev->id, p->pasid); in kfd_process_destroy_pdds()
921 if (pdd->drm_file) { in kfd_process_destroy_pdds()
923 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
924 fput(pdd->drm_file); in kfd_process_destroy_pdds()
926 else if (pdd->vm) in kfd_process_destroy_pdds()
928 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
930 list_del(&pdd->per_device_list); in kfd_process_destroy_pdds()
932 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) in kfd_process_destroy_pdds()
933 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, in kfd_process_destroy_pdds()
936 kfree(pdd->qpd.doorbell_bitmap); in kfd_process_destroy_pdds()
937 idr_destroy(&pdd->alloc_idr); in kfd_process_destroy_pdds()
939 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); in kfd_process_destroy_pdds()
945 if (pdd->runtime_inuse) { in kfd_process_destroy_pdds()
946 pm_runtime_mark_last_busy(pdd->dev->ddev->dev); in kfd_process_destroy_pdds()
947 pm_runtime_put_autosuspend(pdd->dev->ddev->dev); in kfd_process_destroy_pdds()
948 pdd->runtime_inuse = false; in kfd_process_destroy_pdds()
951 kfree(pdd); in kfd_process_destroy_pdds()
964 struct kfd_process_device *pdd; in kfd_process_wq_release() local
973 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_wq_release()
974 sysfs_remove_file(p->kobj, &pdd->attr_vram); in kfd_process_wq_release()
975 sysfs_remove_file(p->kobj, &pdd->attr_sdma); in kfd_process_wq_release()
976 sysfs_remove_file(p->kobj, &pdd->attr_evict); in kfd_process_wq_release()
977 if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) in kfd_process_wq_release()
978 sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy); in kfd_process_wq_release()
979 kobject_del(pdd->kobj_stats); in kfd_process_wq_release()
980 kobject_put(pdd->kobj_stats); in kfd_process_wq_release()
981 pdd->kobj_stats = NULL; in kfd_process_wq_release()
1023 struct kfd_process_device *pdd = NULL; in kfd_process_notifier_release() local
1047 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_notifier_release()
1048 struct kfd_dev *dev = pdd->dev; in kfd_process_notifier_release()
1084 struct kfd_process_device *pdd; in kfd_process_init_cwsr_apu() local
1086 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_init_cwsr_apu()
1087 struct kfd_dev *dev = pdd->dev; in kfd_process_init_cwsr_apu()
1088 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_init_cwsr_apu()
1117 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) in kfd_process_device_init_cwsr_dgpu() argument
1119 struct kfd_dev *dev = pdd->dev; in kfd_process_device_init_cwsr_dgpu()
1120 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_init_cwsr_dgpu()
1131 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, in kfd_process_device_init_cwsr_dgpu()
1247 struct kfd_process_device *pdd = NULL; in kfd_get_process_device_data() local
1249 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_get_process_device_data()
1250 if (pdd->dev == dev) in kfd_get_process_device_data()
1251 return pdd; in kfd_get_process_device_data()
1259 struct kfd_process_device *pdd = NULL; in kfd_create_process_device_data() local
1261 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); in kfd_create_process_device_data()
1262 if (!pdd) in kfd_create_process_device_data()
1265 if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { in kfd_create_process_device_data()
1270 if (init_doorbell_bitmap(&pdd->qpd, dev)) { in kfd_create_process_device_data()
1275 pdd->dev = dev; in kfd_create_process_device_data()
1276 INIT_LIST_HEAD(&pdd->qpd.queues_list); in kfd_create_process_device_data()
1277 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); in kfd_create_process_device_data()
1278 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
1279 pdd->qpd.pqm = &p->pqm; in kfd_create_process_device_data()
1280 pdd->qpd.evicted = 0; in kfd_create_process_device_data()
1281 pdd->qpd.mapped_gws_queue = false; in kfd_create_process_device_data()
1282 pdd->process = p; in kfd_create_process_device_data()
1283 pdd->bound = PDD_UNBOUND; in kfd_create_process_device_data()
1284 pdd->already_dequeued = false; in kfd_create_process_device_data()
1285 pdd->runtime_inuse = false; in kfd_create_process_device_data()
1286 pdd->vram_usage = 0; in kfd_create_process_device_data()
1287 pdd->sdma_past_activity_counter = 0; in kfd_create_process_device_data()
1288 atomic64_set(&pdd->evict_duration_counter, 0); in kfd_create_process_device_data()
1289 list_add(&pdd->per_device_list, &p->per_device_data); in kfd_create_process_device_data()
1292 idr_init(&pdd->alloc_idr); in kfd_create_process_device_data()
1294 return pdd; in kfd_create_process_device_data()
1297 kfree(pdd); in kfd_create_process_device_data()
1315 int kfd_process_device_init_vm(struct kfd_process_device *pdd, in kfd_process_device_init_vm() argument
1322 if (pdd->vm) in kfd_process_device_init_vm()
1325 p = pdd->process; in kfd_process_device_init_vm()
1326 dev = pdd->dev; in kfd_process_device_init_vm()
1331 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
1334 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
1340 amdgpu_vm_set_task_info(pdd->vm); in kfd_process_device_init_vm()
1342 ret = kfd_process_device_reserve_ib_mem(pdd); in kfd_process_device_init_vm()
1345 ret = kfd_process_device_init_cwsr_dgpu(pdd); in kfd_process_device_init_vm()
1349 pdd->drm_file = drm_file; in kfd_process_device_init_vm()
1355 kfd_process_device_free_bos(pdd); in kfd_process_device_init_vm()
1357 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); in kfd_process_device_init_vm()
1358 pdd->vm = NULL; in kfd_process_device_init_vm()
1373 struct kfd_process_device *pdd; in kfd_bind_process_to_device() local
1376 pdd = kfd_get_process_device_data(dev, p); in kfd_bind_process_to_device()
1377 if (!pdd) { in kfd_bind_process_to_device()
1387 if (!pdd->runtime_inuse) { in kfd_bind_process_to_device()
1395 err = kfd_iommu_bind_process_to_device(pdd); in kfd_bind_process_to_device()
1399 err = kfd_process_device_init_vm(pdd, NULL); in kfd_bind_process_to_device()
1407 pdd->runtime_inuse = true; in kfd_bind_process_to_device()
1409 return pdd; in kfd_bind_process_to_device()
1413 if (!pdd->runtime_inuse) { in kfd_bind_process_to_device()
1431 struct kfd_process_device *pdd) in kfd_get_next_process_device_data() argument
1433 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) in kfd_get_next_process_device_data()
1435 return list_next_entry(pdd, per_device_list); in kfd_get_next_process_device_data()
1446 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, in kfd_process_device_create_obj_handle() argument
1449 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); in kfd_process_device_create_obj_handle()
1455 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, in kfd_process_device_translate_handle() argument
1461 return idr_find(&pdd->alloc_idr, handle); in kfd_process_device_translate_handle()
1467 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, in kfd_process_device_remove_obj_handle() argument
1471 idr_remove(&pdd->alloc_idr, handle); in kfd_process_device_remove_obj_handle()
1518 struct kfd_process_device *pdd; in kfd_process_evict_queues() local
1522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
1523 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
1524 &pdd->qpd); in kfd_process_evict_queues()
1538 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
1541 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
1542 &pdd->qpd)) in kfd_process_evict_queues()
1554 struct kfd_process_device *pdd; in kfd_process_restore_queues() local
1557 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_restore_queues()
1558 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_restore_queues()
1559 &pdd->qpd); in kfd_process_restore_queues()
1690 struct kfd_process_device *pdd; in kfd_reserved_mem_mmap() local
1698 pdd = kfd_get_process_device_data(dev, process); in kfd_reserved_mem_mmap()
1699 if (!pdd) in kfd_reserved_mem_mmap()
1701 qpd = &pdd->qpd; in kfd_reserved_mem_mmap()
1718 void kfd_flush_tlb(struct kfd_process_device *pdd) in kfd_flush_tlb() argument
1720 struct kfd_dev *dev = pdd->dev; in kfd_flush_tlb()
1726 if (pdd->qpd.vmid) in kfd_flush_tlb()
1728 pdd->qpd.vmid); in kfd_flush_tlb()
1731 pdd->process->pasid); in kfd_flush_tlb()