Lines Matching refs:pdd

162 			struct kfd_process_device *pdd)  in kfd_process_free_gpuvm()  argument
164 struct kfd_dev *dev = pdd->dev; in kfd_process_free_gpuvm()
166 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); in kfd_process_free_gpuvm()
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, in kfd_process_alloc_gpuvm() argument
180 struct kfd_dev *kdev = pdd->dev; in kfd_process_alloc_gpuvm()
186 pdd->vm, &mem, NULL, flags); in kfd_process_alloc_gpuvm()
190 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); in kfd_process_alloc_gpuvm()
205 handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_process_alloc_gpuvm()
224 kfd_process_device_remove_obj_handle(pdd, handle); in kfd_process_alloc_gpuvm()
227 kfd_process_free_gpuvm(mem, pdd); in kfd_process_alloc_gpuvm()
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) in kfd_process_device_reserve_ib_mem() argument
245 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_reserve_ib_mem()
257 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, in kfd_process_device_reserve_ib_mem()
379 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) in kfd_process_device_free_bos() argument
381 struct kfd_process *p = pdd->process; in kfd_process_device_free_bos()
389 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in kfd_process_device_free_bos()
400 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem); in kfd_process_device_free_bos()
401 kfd_process_device_remove_obj_handle(pdd, id); in kfd_process_device_free_bos()
407 struct kfd_process_device *pdd; in kfd_process_free_outstanding_kfd_bos() local
409 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_process_free_outstanding_kfd_bos()
410 kfd_process_device_free_bos(pdd); in kfd_process_free_outstanding_kfd_bos()
415 struct kfd_process_device *pdd, *temp; in kfd_process_destroy_pdds() local
417 list_for_each_entry_safe(pdd, temp, &p->per_device_data, in kfd_process_destroy_pdds()
420 pdd->dev->id, p->pasid); in kfd_process_destroy_pdds()
422 if (pdd->drm_file) { in kfd_process_destroy_pdds()
424 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
425 fput(pdd->drm_file); in kfd_process_destroy_pdds()
427 else if (pdd->vm) in kfd_process_destroy_pdds()
429 pdd->dev->kgd, pdd->vm); in kfd_process_destroy_pdds()
431 list_del(&pdd->per_device_list); in kfd_process_destroy_pdds()
433 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) in kfd_process_destroy_pdds()
434 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, in kfd_process_destroy_pdds()
437 kfree(pdd->qpd.doorbell_bitmap); in kfd_process_destroy_pdds()
438 idr_destroy(&pdd->alloc_idr); in kfd_process_destroy_pdds()
440 kfree(pdd); in kfd_process_destroy_pdds()
498 struct kfd_process_device *pdd = NULL; in kfd_process_notifier_release() local
522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_notifier_release()
523 struct kfd_dev *dev = pdd->dev; in kfd_process_notifier_release()
554 struct kfd_process_device *pdd; in kfd_process_init_cwsr_apu() local
556 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_init_cwsr_apu()
557 struct kfd_dev *dev = pdd->dev; in kfd_process_init_cwsr_apu()
558 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_init_cwsr_apu()
588 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) in kfd_process_device_init_cwsr_dgpu() argument
590 struct kfd_dev *dev = pdd->dev; in kfd_process_device_init_cwsr_dgpu()
591 struct qcm_process_device *qpd = &pdd->qpd; in kfd_process_device_init_cwsr_dgpu()
601 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, in kfd_process_device_init_cwsr_dgpu()
718 struct kfd_process_device *pdd = NULL; in kfd_get_process_device_data() local
720 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_get_process_device_data()
721 if (pdd->dev == dev) in kfd_get_process_device_data()
722 return pdd; in kfd_get_process_device_data()
730 struct kfd_process_device *pdd = NULL; in kfd_create_process_device_data() local
732 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); in kfd_create_process_device_data()
733 if (!pdd) in kfd_create_process_device_data()
736 if (init_doorbell_bitmap(&pdd->qpd, dev)) { in kfd_create_process_device_data()
738 kfree(pdd); in kfd_create_process_device_data()
742 pdd->dev = dev; in kfd_create_process_device_data()
743 INIT_LIST_HEAD(&pdd->qpd.queues_list); in kfd_create_process_device_data()
744 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); in kfd_create_process_device_data()
745 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
746 pdd->qpd.pqm = &p->pqm; in kfd_create_process_device_data()
747 pdd->qpd.evicted = 0; in kfd_create_process_device_data()
748 pdd->process = p; in kfd_create_process_device_data()
749 pdd->bound = PDD_UNBOUND; in kfd_create_process_device_data()
750 pdd->already_dequeued = false; in kfd_create_process_device_data()
751 list_add(&pdd->per_device_list, &p->per_device_data); in kfd_create_process_device_data()
754 idr_init(&pdd->alloc_idr); in kfd_create_process_device_data()
756 return pdd; in kfd_create_process_device_data()
773 int kfd_process_device_init_vm(struct kfd_process_device *pdd, in kfd_process_device_init_vm() argument
780 if (pdd->vm) in kfd_process_device_init_vm()
783 p = pdd->process; in kfd_process_device_init_vm()
784 dev = pdd->dev; in kfd_process_device_init_vm()
789 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
792 &pdd->vm, &p->kgd_process_info, &p->ef); in kfd_process_device_init_vm()
798 amdgpu_vm_set_task_info(pdd->vm); in kfd_process_device_init_vm()
800 ret = kfd_process_device_reserve_ib_mem(pdd); in kfd_process_device_init_vm()
803 ret = kfd_process_device_init_cwsr_dgpu(pdd); in kfd_process_device_init_vm()
807 pdd->drm_file = drm_file; in kfd_process_device_init_vm()
813 kfd_process_device_free_bos(pdd); in kfd_process_device_init_vm()
815 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); in kfd_process_device_init_vm()
816 pdd->vm = NULL; in kfd_process_device_init_vm()
831 struct kfd_process_device *pdd; in kfd_bind_process_to_device() local
834 pdd = kfd_get_process_device_data(dev, p); in kfd_bind_process_to_device()
835 if (!pdd) { in kfd_bind_process_to_device()
840 err = kfd_iommu_bind_process_to_device(pdd); in kfd_bind_process_to_device()
844 err = kfd_process_device_init_vm(pdd, NULL); in kfd_bind_process_to_device()
848 return pdd; in kfd_bind_process_to_device()
861 struct kfd_process_device *pdd) in kfd_get_next_process_device_data() argument
863 if (list_is_last(&pdd->per_device_list, &p->per_device_data)) in kfd_get_next_process_device_data()
865 return list_next_entry(pdd, per_device_list); in kfd_get_next_process_device_data()
876 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, in kfd_process_device_create_obj_handle() argument
879 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); in kfd_process_device_create_obj_handle()
885 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, in kfd_process_device_translate_handle() argument
891 return idr_find(&pdd->alloc_idr, handle); in kfd_process_device_translate_handle()
897 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, in kfd_process_device_remove_obj_handle() argument
901 idr_remove(&pdd->alloc_idr, handle); in kfd_process_device_remove_obj_handle()
948 struct kfd_process_device *pdd; in kfd_process_evict_queues() local
952 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
953 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
954 &pdd->qpd); in kfd_process_evict_queues()
968 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_evict_queues()
971 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
972 &pdd->qpd)) in kfd_process_evict_queues()
984 struct kfd_process_device *pdd; in kfd_process_restore_queues() local
987 list_for_each_entry(pdd, &p->per_device_data, per_device_list) { in kfd_process_restore_queues()
988 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_restore_queues()
989 &pdd->qpd); in kfd_process_restore_queues()
1119 struct kfd_process_device *pdd; in kfd_reserved_mem_mmap() local
1127 pdd = kfd_get_process_device_data(dev, process); in kfd_reserved_mem_mmap()
1128 if (!pdd) in kfd_reserved_mem_mmap()
1130 qpd = &pdd->qpd; in kfd_reserved_mem_mmap()
1147 void kfd_flush_tlb(struct kfd_process_device *pdd) in kfd_flush_tlb() argument
1149 struct kfd_dev *dev = pdd->dev; in kfd_flush_tlb()
1156 if (pdd->qpd.vmid) in kfd_flush_tlb()
1157 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid); in kfd_flush_tlb()
1159 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid); in kfd_flush_tlb()