Home
last modified time | relevance | path

Searched refs:dqm (Results 1 – 21 of 21) sorted by relevance

/Linux-v6.1/drivers/gpu/drm/amd/amdkfd/
Dkfd_device_queue_manager.c44 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
47 static int execute_queues_cpsch(struct device_queue_manager *dqm,
50 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
54 static int map_queues_cpsch(struct device_queue_manager *dqm);
56 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
59 static inline void deallocate_hqd(struct device_queue_manager *dqm,
61 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
62 static int allocate_sdma_queue(struct device_queue_manager *dqm,
74 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) in is_pipe_enabled() argument
77 int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec in is_pipe_enabled()
[all …]
Dkfd_device_queue_manager.h131 int (*create_queue)(struct device_queue_manager *dqm,
138 int (*destroy_queue)(struct device_queue_manager *dqm,
142 int (*update_queue)(struct device_queue_manager *dqm,
145 int (*register_process)(struct device_queue_manager *dqm,
148 int (*unregister_process)(struct device_queue_manager *dqm,
151 int (*initialize)(struct device_queue_manager *dqm);
152 int (*start)(struct device_queue_manager *dqm);
153 int (*stop)(struct device_queue_manager *dqm);
154 void (*pre_reset)(struct device_queue_manager *dqm);
155 void (*uninitialize)(struct device_queue_manager *dqm);
[all …]
Dkfd_process_queue_manager.c89 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device()
138 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws()
277 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
299 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data, in pqm_create_queue()
306 if ((dev->dqm->sched_policy == in pqm_create_queue()
308 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || in pqm_create_queue()
309 (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) { in pqm_create_queue()
320 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data, in pqm_create_queue()
333 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue()
381 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
[all …]
Dkfd_device_queue_manager_vi.c30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
36 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
42 static int update_qpd_vi(struct device_queue_manager *dqm,
44 static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
46 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
48 static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
98 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
127 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, in set_cache_memory_policy_vi_tonga() argument
154 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument
195 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, in update_qpd_vi_tonga() argument
[all …]
Dkfd_device_queue_manager_v9.c31 static int update_qpd_v9(struct device_queue_manager *dqm,
33 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
53 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument
66 if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { in update_qpd_v9()
73 } else if (dqm->dev->noretry && in update_qpd_v9()
74 !dqm->dev->use_iommu_v2) { in update_qpd_v9()
90 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
Dkfd_packet_manager.c48 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size()
50 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
51 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
52 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
53 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
66 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size()
102 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
146 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
152 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
225 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
[all …]
Dkfd_device_queue_manager_cik.c30 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
36 static int update_qpd_cik(struct device_queue_manager *dqm,
38 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
40 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
42 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
91 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
117 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument
151 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, in update_qpd_cik_hawaii() argument
181 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument
197 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, in init_sdma_vm_hawaii() argument
Dkfd_mqd_manager.c57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd()
58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd()
59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd()
77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd()
79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in allocate_sdma_mqd()
81 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd()
83 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd()
85 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
Dkfd_device_queue_manager_v10.c30 static int update_qpd_v10(struct device_queue_manager *dqm,
32 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
52 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument
76 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
Dkfd_device_queue_manager_v11.c29 static int update_qpd_v11(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v11(struct device_queue_manager *dqm, in update_qpd_v11() argument
76 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v11() argument
Dkfd_device.c489 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
609 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init()
610 if (!kfd->dqm) { in kgd2kfd_device_init()
657 kfd->dqm->sched_policy); in kgd2kfd_device_init()
665 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init()
687 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit()
708 kfd->dqm->ops.pre_reset(kfd->dqm); in kgd2kfd_pre_reset()
758 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend()
800 err = kfd->dqm->ops.start(kfd->dqm); in kfd_resume()
1164 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws()
[all …]
Dkfd_process.c109 struct device_queue_manager *dqm; in kfd_sdma_activity_worker() local
120 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker()
122 if (!dqm || !qpd) in kfd_sdma_activity_worker()
151 dqm_lock(dqm); in kfd_sdma_activity_worker()
160 dqm_unlock(dqm); in kfd_sdma_activity_worker()
177 dqm_unlock(dqm); in kfd_sdma_activity_worker()
181 dqm_unlock(dqm); in kfd_sdma_activity_worker()
211 dqm_lock(dqm); in kfd_sdma_activity_worker()
233 dqm_unlock(dqm); in kfd_sdma_activity_worker()
1510 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
[all …]
Dkfd_int_process_v9.c114 ret = kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_poison_consumption_v9()
204 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9()
211 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9()
364 kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_wq_v9()
Dkfd_int_process_v11.c210 if (dev->dqm->ops.reset_queues) in event_interrupt_poison_consumption_v11()
211 ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); in event_interrupt_poison_consumption_v11()
Dkfd_priv.h299 struct device_queue_manager *dqm; member
611 struct device_queue_manager *dqm; member
1189 void device_queue_manager_uninit(struct device_queue_manager *dqm);
1193 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
1247 struct device_queue_manager *dqm; member
1289 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1392 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
Dkfd_packet_manager_v9.c122 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9()
133 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9()
266 packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ? in pm_unmap_queues_v9()
Dkfd_packet_manager_vi.c80 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi()
94 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
Dcik_event_interrupt.c113 kfd_dqm_evict_pasid(dev->dqm, pasid); in cik_event_interrupt_wq()
Dkfd_kernel_queue.c65 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in kq_initialize()
68 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in kq_initialize()
Dkfd_topology.c1713 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
1715 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device()
1989 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
2014 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()
Dkfd_chardev.c597 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
911 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va()
1442 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws()