| /Linux-v5.15/drivers/gpu/drm/amd/amdkfd/ |
| D | kfd_device_queue_manager.c | 42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, 45 static int execute_queues_cpsch(struct device_queue_manager *dqm, 48 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 52 static int map_queues_cpsch(struct device_queue_manager *dqm); 54 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 57 static inline void deallocate_hqd(struct device_queue_manager *dqm, 59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); 60 static int allocate_sdma_queue(struct device_queue_manager *dqm, 72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) in is_pipe_enabled() argument 75 int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec in is_pipe_enabled() [all …]
|
| D | kfd_device_queue_manager.h | 87 int (*create_queue)(struct device_queue_manager *dqm, 91 int (*destroy_queue)(struct device_queue_manager *dqm, 95 int (*update_queue)(struct device_queue_manager *dqm, 98 int (*register_process)(struct device_queue_manager *dqm, 101 int (*unregister_process)(struct device_queue_manager *dqm, 104 int (*initialize)(struct device_queue_manager *dqm); 105 int (*start)(struct device_queue_manager *dqm); 106 int (*stop)(struct device_queue_manager *dqm); 107 void (*pre_reset)(struct device_queue_manager *dqm); 108 void (*uninitialize)(struct device_queue_manager *dqm); [all …]
|
| D | kfd_process_queue_manager.c | 74 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device() 123 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws() 234 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue() 256 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); in pqm_create_queue() 262 if ((dev->dqm->sched_policy == in pqm_create_queue() 264 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || in pqm_create_queue() 265 (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) { in pqm_create_queue() 276 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); in pqm_create_queue() 288 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue() 336 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue() [all …]
|
| D | kfd_device_queue_manager_vi.c | 29 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 35 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 41 static int update_qpd_vi(struct device_queue_manager *dqm, 43 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, 45 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 47 static void init_sdma_vm_tonga(struct device_queue_manager *dqm, 97 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument 126 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, in set_cache_memory_policy_vi_tonga() argument 153 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument 194 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, in update_qpd_vi_tonga() argument [all …]
|
| D | kfd_device_queue_manager_v9.c | 30 static int update_qpd_v9(struct device_queue_manager *dqm, 32 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, 52 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument 65 if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) { in update_qpd_v9() 72 } else if (dqm->dev->noretry && in update_qpd_v9() 73 !dqm->dev->use_iommu_v2) { in update_qpd_v9() 89 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
|
| D | kfd_device_queue_manager_cik.c | 29 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 35 static int update_qpd_cik(struct device_queue_manager *dqm, 37 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 39 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 41 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, 90 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument 116 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument 150 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, in update_qpd_cik_hawaii() argument 180 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument 196 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, in init_sdma_vm_hawaii() argument
|
| D | kfd_packet_manager.c | 47 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size() 49 process_count = pm->dqm->processes_count; in pm_calc_rlib_size() 50 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size() 51 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size() 52 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size() 65 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size() 101 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib() 145 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib() 151 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib() 224 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument [all …]
|
| D | kfd_mqd_manager.c | 56 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd() 57 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd() 58 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd() 76 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd() 78 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in allocate_sdma_mqd() 80 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd() 82 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd() 84 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
|
| D | kfd_device_queue_manager_v10.c | 29 static int update_qpd_v10(struct device_queue_manager *dqm, 31 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, 51 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument 84 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
|
| D | kfd_device.c | 775 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init() 900 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init() 901 if (!kfd->dqm) { in kgd2kfd_device_init() 944 kfd->dqm->sched_policy); in kgd2kfd_device_init() 952 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init() 974 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit() 995 kfd->dqm->ops.pre_reset(kfd->dqm); in kgd2kfd_pre_reset() 1045 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend() 1087 err = kfd->dqm->ops.start(kfd->dqm); in kfd_resume() 1443 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws() [all …]
|
| D | kfd_process.c | 105 struct device_queue_manager *dqm; in kfd_sdma_activity_worker() local 116 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker() 118 if (!dqm || !qpd) in kfd_sdma_activity_worker() 147 dqm_lock(dqm); in kfd_sdma_activity_worker() 156 dqm_unlock(dqm); in kfd_sdma_activity_worker() 173 dqm_unlock(dqm); in kfd_sdma_activity_worker() 177 dqm_unlock(dqm); in kfd_sdma_activity_worker() 207 dqm_lock(dqm); in kfd_sdma_activity_worker() 229 dqm_unlock(dqm); in kfd_sdma_activity_worker() 1445 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data() [all …]
|
| D | kfd_priv.h | 275 struct device_queue_manager *dqm; member 573 struct device_queue_manager *dqm; member 1010 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1014 int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid); 1060 struct device_queue_manager *dqm; member 1104 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1199 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
|
| D | kfd_int_process_v9.c | 133 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9() 140 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9() 274 kfd_process_vm_fault(dev->dqm, pasid); in event_interrupt_wq_v9()
|
| D | kfd_dbgmgr.c | 87 if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_dbgmgr_create()
|
| D | kfd_packet_manager_vi.c | 79 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi() 93 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
|
| D | cik_event_interrupt.c | 113 kfd_process_vm_fault(dev->dqm, pasid); in cik_event_interrupt_wq()
|
| D | kfd_kernel_queue.c | 64 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in kq_initialize() 67 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in kq_initialize()
|
| D | kfd_packet_manager_v9.c | 121 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9() 132 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9()
|
| D | kfd_topology.c | 1388 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device() 1390 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device() 1612 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device() 1637 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()
|
| D | kfd_chardev.c | 526 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, in kfd_ioctl_set_memory_policy() 1130 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va() 1629 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws()
|