Home
last modified time | relevance | path

Searched refs:dqm (Results 1 – 21 of 21) sorted by relevance

/Linux-v5.4/drivers/gpu/drm/amd/amdkfd/
Dkfd_device_queue_manager.c42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) in is_pipe_enabled() argument
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec in is_pipe_enabled()
[all …]
Dkfd_device_queue_manager.h85 int (*create_queue)(struct device_queue_manager *dqm,
89 int (*destroy_queue)(struct device_queue_manager *dqm,
93 int (*update_queue)(struct device_queue_manager *dqm,
96 int (*register_process)(struct device_queue_manager *dqm,
99 int (*unregister_process)(struct device_queue_manager *dqm,
102 int (*initialize)(struct device_queue_manager *dqm);
103 int (*start)(struct device_queue_manager *dqm);
104 int (*stop)(struct device_queue_manager *dqm);
105 void (*uninitialize)(struct device_queue_manager *dqm);
106 int (*create_kernel_queue)(struct device_queue_manager *dqm,
[all …]
Dkfd_process_queue_manager.c74 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device()
123 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws()
232 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
243 if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count in pqm_create_queue()
244 >= get_num_sdma_queues(dev->dqm)) || in pqm_create_queue()
246 dev->dqm->xgmi_sdma_queue_count in pqm_create_queue()
247 >= get_num_xgmi_sdma_queues(dev->dqm))) { in pqm_create_queue()
258 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); in pqm_create_queue()
265 if ((dev->dqm->sched_policy == in pqm_create_queue()
267 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || in pqm_create_queue()
[all …]
Dkfd_device_queue_manager_vi.c29 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
35 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
41 static int update_qpd_vi(struct device_queue_manager *dqm,
43 static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
45 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
47 static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
97 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
126 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, in set_cache_memory_policy_vi_tonga() argument
153 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument
194 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, in update_qpd_vi_tonga() argument
[all …]
Dkfd_device_queue_manager_cik.c29 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
35 static int update_qpd_cik(struct device_queue_manager *dqm,
37 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
39 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
41 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
90 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
116 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument
150 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, in update_qpd_cik_hawaii() argument
180 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument
196 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, in init_sdma_vm_hawaii() argument
Dkfd_mqd_manager.c56 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd()
57 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd()
58 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd()
76 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd()
78 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in allocate_sdma_mqd()
80 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd()
82 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd()
84 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
Dkfd_packet_manager.c47 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size()
49 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
50 queue_count = pm->dqm->queue_count; in pm_calc_rlib_size()
51 compute_queue_count = queue_count - pm->dqm->sdma_queue_count - in pm_calc_rlib_size()
52 pm->dqm->xgmi_sdma_queue_count; in pm_calc_rlib_size()
65 compute_queue_count > get_queues_num(pm->dqm)) { in pm_calc_rlib_size()
100 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
144 pm->dqm->processes_count, pm->dqm->queue_count); in pm_create_runlist_ib()
150 if (proccesses_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
223 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
[all …]
Dkfd_device_queue_manager_v9.c30 static int update_qpd_v9(struct device_queue_manager *dqm,
32 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
52 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument
65 !dqm->dev->device_info->needs_iommu_device) in update_qpd_v9()
80 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
Dkfd_device_queue_manager_v10.c29 static int update_qpd_v10(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument
83 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
Dkfd_device.c677 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init()
678 if (!kfd->dqm) { in kgd2kfd_device_init()
705 kfd->dqm->sched_policy); in kgd2kfd_device_init()
712 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init()
735 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit()
755 dqm_lock(kfd->dqm); in kgd2kfd_pre_reset()
774 dqm_unlock(kfd->dqm); in kgd2kfd_post_reset()
800 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend()
836 err = kfd->dqm->ops.start(kfd->dqm); in kfd_resume()
1176 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws()
[all …]
Dkfd_priv.h271 struct device_queue_manager *dqm; member
545 struct device_queue_manager *dqm; member
885 void device_queue_manager_uninit(struct device_queue_manager *dqm);
889 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
932 struct device_queue_manager *dqm; member
976 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1057 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
Dkfd_process.c745 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
953 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
971 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
988 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, in kfd_process_restore_queues()
1152 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_flush_tlb()
Dkfd_int_process_v9.c50 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9()
121 kfd_process_vm_fault(dev->dqm, pasid); in event_interrupt_wq_v9()
Dkfd_dbgmgr.c87 if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_dbgmgr_create()
Dkfd_kernel_queue_v10.c119 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v10()
130 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v10()
Dcik_event_interrupt.c108 kfd_process_vm_fault(dev->dqm, pasid); in cik_event_interrupt_wq()
Dkfd_kernel_queue.c61 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in initialize()
64 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in initialize()
Dkfd_kernel_queue_vi.c120 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi()
134 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
Dkfd_kernel_queue_v9.c115 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9()
126 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9()
Dkfd_chardev.c504 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, in kfd_ioctl_set_memory_policy()
538 if (dev->dqm->ops.set_trap_handler(dev->dqm, in kfd_ioctl_set_trap_handler()
1130 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va()
Dkfd_topology.c1291 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
1479 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
1504 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets); in kfd_debugfs_rls_by_device()