Lines Matching full:pm

40 static void pm_calc_rlib_size(struct packet_manager *pm,  in pm_calc_rlib_size()  argument
47 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size()
49 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
50 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
51 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
52 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
65 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size()
71 map_queue_size = pm->pmf->map_queues_size; in pm_calc_rlib_size()
73 *rlib_size = process_count * pm->pmf->map_process_size + in pm_calc_rlib_size()
81 *rlib_size += pm->pmf->runlist_size; in pm_calc_rlib_size()
86 static int pm_allocate_runlist_ib(struct packet_manager *pm, in pm_allocate_runlist_ib() argument
94 if (WARN_ON(pm->allocated)) in pm_allocate_runlist_ib()
97 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); in pm_allocate_runlist_ib()
99 mutex_lock(&pm->lock); in pm_allocate_runlist_ib()
101 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
102 &pm->ib_buffer_obj); in pm_allocate_runlist_ib()
109 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; in pm_allocate_runlist_ib()
110 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; in pm_allocate_runlist_ib()
113 pm->allocated = true; in pm_allocate_runlist_ib()
116 mutex_unlock(&pm->lock); in pm_allocate_runlist_ib()
120 static int pm_create_runlist_ib(struct packet_manager *pm, in pm_create_runlist_ib() argument
136 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, in pm_create_runlist_ib()
142 pm->ib_size_bytes = alloc_size_bytes; in pm_create_runlist_ib()
145 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
151 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
153 pm_release_ib(pm); in pm_create_runlist_ib()
157 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); in pm_create_runlist_ib()
162 inc_wptr(&rl_wptr, pm->pmf->map_process_size, in pm_create_runlist_ib()
172 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
180 pm->pmf->map_queues_size, in pm_create_runlist_ib()
191 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
200 pm->pmf->map_queues_size, in pm_create_runlist_ib()
208 if (!pm->is_over_subscription) in pm_create_runlist_ib()
210 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], in pm_create_runlist_ib()
215 pm->is_over_subscription = is_over_subscription; in pm_create_runlist_ib()
224 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
237 pm->pmf = &kfd_vi_pm_funcs; in pm_init()
255 pm->pmf = &kfd_v9_pm_funcs; in pm_init()
258 pm->pmf = &kfd_aldebaran_pm_funcs; in pm_init()
266 pm->dqm = dqm; in pm_init()
267 mutex_init(&pm->lock); in pm_init()
268 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); in pm_init()
269 if (!pm->priv_queue) { in pm_init()
270 mutex_destroy(&pm->lock); in pm_init()
273 pm->allocated = false; in pm_init()
278 void pm_uninit(struct packet_manager *pm, bool hanging) in pm_uninit() argument
280 mutex_destroy(&pm->lock); in pm_uninit()
281 kernel_queue_uninit(pm->priv_queue, hanging); in pm_uninit()
282 pm->priv_queue = NULL; in pm_uninit()
285 int pm_send_set_resources(struct packet_manager *pm, in pm_send_set_resources() argument
291 size = pm->pmf->set_resources_size; in pm_send_set_resources()
292 mutex_lock(&pm->lock); in pm_send_set_resources()
293 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_set_resources()
302 retval = pm->pmf->set_resources(pm, buffer, res); in pm_send_set_resources()
304 kq_submit_packet(pm->priv_queue); in pm_send_set_resources()
306 kq_rollback_packet(pm->priv_queue); in pm_send_set_resources()
309 mutex_unlock(&pm->lock); in pm_send_set_resources()
314 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) in pm_send_runlist() argument
321 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, in pm_send_runlist()
328 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); in pm_send_runlist()
329 mutex_lock(&pm->lock); in pm_send_runlist()
331 retval = kq_acquire_packet_buffer(pm->priv_queue, in pm_send_runlist()
336 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, in pm_send_runlist()
341 kq_submit_packet(pm->priv_queue); in pm_send_runlist()
343 mutex_unlock(&pm->lock); in pm_send_runlist()
348 kq_rollback_packet(pm->priv_queue); in pm_send_runlist()
350 mutex_unlock(&pm->lock); in pm_send_runlist()
352 pm_release_ib(pm); in pm_send_runlist()
356 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, in pm_send_query_status() argument
365 size = pm->pmf->query_status_size; in pm_send_query_status()
366 mutex_lock(&pm->lock); in pm_send_query_status()
367 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_query_status()
375 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); in pm_send_query_status()
377 kq_submit_packet(pm->priv_queue); in pm_send_query_status()
379 kq_rollback_packet(pm->priv_queue); in pm_send_query_status()
382 mutex_unlock(&pm->lock); in pm_send_query_status()
386 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, in pm_send_unmap_queue() argument
394 size = pm->pmf->unmap_queues_size; in pm_send_unmap_queue()
395 mutex_lock(&pm->lock); in pm_send_unmap_queue()
396 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_unmap_queue()
404 retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, in pm_send_unmap_queue()
407 kq_submit_packet(pm->priv_queue); in pm_send_unmap_queue()
409 kq_rollback_packet(pm->priv_queue); in pm_send_unmap_queue()
412 mutex_unlock(&pm->lock); in pm_send_unmap_queue()
416 void pm_release_ib(struct packet_manager *pm) in pm_release_ib() argument
418 mutex_lock(&pm->lock); in pm_release_ib()
419 if (pm->allocated) { in pm_release_ib()
420 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); in pm_release_ib()
421 pm->allocated = false; in pm_release_ib()
423 mutex_unlock(&pm->lock); in pm_release_ib()
430 struct packet_manager *pm = data; in pm_debugfs_runlist() local
432 mutex_lock(&pm->lock); in pm_debugfs_runlist()
434 if (!pm->allocated) { in pm_debugfs_runlist()
440 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false); in pm_debugfs_runlist()
443 mutex_unlock(&pm->lock); in pm_debugfs_runlist()
447 int pm_debugfs_hang_hws(struct packet_manager *pm) in pm_debugfs_hang_hws() argument
452 if (!pm->priv_queue) in pm_debugfs_hang_hws()
455 size = pm->pmf->query_status_size; in pm_debugfs_hang_hws()
456 mutex_lock(&pm->lock); in pm_debugfs_hang_hws()
457 kq_acquire_packet_buffer(pm->priv_queue, in pm_debugfs_hang_hws()
465 kq_submit_packet(pm->priv_queue); in pm_debugfs_hang_hws()
471 mutex_unlock(&pm->lock); in pm_debugfs_hang_hws()