Lines Matching refs:gfx
45 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit()
46 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
47 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
56 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue()
57 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
58 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
59 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
60 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
68 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_is_mec_queue_enabled()
76 bit += me * adev->gfx.me.num_pipe_per_me in amdgpu_gfx_me_queue_to_bit()
77 * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
78 bit += pipe * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
87 *queue = bit % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_bit_to_me_queue()
88 *pipe = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
89 % adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
90 *me = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
91 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
98 adev->gfx.me.queue_bitmap); in amdgpu_gfx_is_me_queue_enabled()
114 i = ffs(adev->gfx.scratch.free_mask); in amdgpu_gfx_scratch_get()
115 if (i != 0 && i <= adev->gfx.scratch.num_reg) { in amdgpu_gfx_scratch_get()
117 adev->gfx.scratch.free_mask &= ~(1u << i); in amdgpu_gfx_scratch_get()
118 *reg = adev->gfx.scratch.reg_base + i; in amdgpu_gfx_scratch_get()
134 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); in amdgpu_gfx_scratch_free()
194 return adev->gfx.mec.num_mec > 1; in amdgpu_gfx_is_multipipe_capable()
203 if (adev->gfx.num_compute_rings > 1 && in amdgpu_gfx_is_high_priority_compute_queue()
204 ring == &adev->gfx.compute_ring[0]) in amdgpu_gfx_is_high_priority_compute_queue()
214 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * in amdgpu_gfx_compute_queue_acquire()
215 adev->gfx.mec.num_queue_per_pipe, in amdgpu_gfx_compute_queue_acquire()
216 adev->gfx.num_compute_rings); in amdgpu_gfx_compute_queue_acquire()
221 pipe = i % adev->gfx.mec.num_pipe_per_mec; in amdgpu_gfx_compute_queue_acquire()
222 queue = (i / adev->gfx.mec.num_pipe_per_mec) % in amdgpu_gfx_compute_queue_acquire()
223 adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_compute_queue_acquire()
225 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, in amdgpu_gfx_compute_queue_acquire()
226 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
231 set_bit(i, adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
234 …dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGP… in amdgpu_gfx_compute_queue_acquire()
242 queue = i % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
243 me = (i / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_graphics_queue_acquire()
244 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_graphics_queue_acquire()
246 if (me >= adev->gfx.me.num_me) in amdgpu_gfx_graphics_queue_acquire()
251 set_bit(i, adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
255 adev->gfx.num_gfx_rings = in amdgpu_gfx_graphics_queue_acquire()
256 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in amdgpu_gfx_graphics_queue_acquire()
265 queue_bit = adev->gfx.mec.num_mec in amdgpu_gfx_kiq_acquire()
266 * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_kiq_acquire()
267 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_kiq_acquire()
270 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_kiq_acquire()
298 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init_ring()
330 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_fini()
340 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init()
369 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_init()
385 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
386 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) in amdgpu_gfx_mqd_sw_init()
392 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_init()
393 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_init()
404 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
405 if (!adev->gfx.me.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
412 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_init()
413 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_init()
424 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
425 if (!adev->gfx.mec.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
439 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
440 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_fini()
441 kfree(adev->gfx.me.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
448 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
449 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_fini()
450 kfree(adev->gfx.mec.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
456 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_fini()
457 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); in amdgpu_gfx_mqd_sw_fini()
465 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_disable_kcq()
472 spin_lock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
474 adev->gfx.num_compute_rings)) { in amdgpu_gfx_disable_kcq()
475 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
479 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_disable_kcq()
480 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], in amdgpu_gfx_disable_kcq()
483 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_enable_kcq()
504 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; in amdgpu_gfx_enable_kcq()
512 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_enable_kcq()
528 spin_lock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
530 adev->gfx.num_compute_rings + in amdgpu_gfx_enable_kcq()
534 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
539 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_enable_kcq()
540 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); in amdgpu_gfx_enable_kcq()
543 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
568 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
575 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) in amdgpu_gfx_off_ctrl()
578 adev->gfx.gfx_off_req_count--; in amdgpu_gfx_off_ctrl()
580 if (adev->gfx.gfx_off_req_count == 0 && in amdgpu_gfx_off_ctrl()
581 !adev->gfx.gfx_off_state) { in amdgpu_gfx_off_ctrl()
585 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, in amdgpu_gfx_off_ctrl()
589 if (adev->gfx.gfx_off_req_count == 0) { in amdgpu_gfx_off_ctrl()
590 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); in amdgpu_gfx_off_ctrl()
592 if (adev->gfx.gfx_off_state && in amdgpu_gfx_off_ctrl()
594 adev->gfx.gfx_off_state = false; in amdgpu_gfx_off_ctrl()
596 if (adev->gfx.funcs->init_spm_golden) { in amdgpu_gfx_off_ctrl()
604 adev->gfx.gfx_off_req_count++; in amdgpu_gfx_off_ctrl()
608 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
616 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
620 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
635 if (!adev->gfx.ras_if) { in amdgpu_gfx_ras_late_init()
636 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); in amdgpu_gfx_ras_late_init()
637 if (!adev->gfx.ras_if) in amdgpu_gfx_ras_late_init()
639 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; in amdgpu_gfx_ras_late_init()
640 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in amdgpu_gfx_ras_late_init()
641 adev->gfx.ras_if->sub_block_index = 0; in amdgpu_gfx_ras_late_init()
643 fs_info.head = ih_info.head = *adev->gfx.ras_if; in amdgpu_gfx_ras_late_init()
644 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, in amdgpu_gfx_ras_late_init()
649 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { in amdgpu_gfx_ras_late_init()
653 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); in amdgpu_gfx_ras_late_init()
664 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); in amdgpu_gfx_ras_late_init()
666 kfree(adev->gfx.ras_if); in amdgpu_gfx_ras_late_init()
667 adev->gfx.ras_if = NULL; in amdgpu_gfx_ras_late_init()
674 adev->gfx.ras_if) { in amdgpu_gfx_ras_fini()
675 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_ras_fini()
698 if (adev->gfx.ras_funcs && in amdgpu_gfx_process_ras_data_cb()
699 adev->gfx.ras_funcs->query_ras_error_count) in amdgpu_gfx_process_ras_data_cb()
700 adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); in amdgpu_gfx_process_ras_data_cb()
710 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_cp_ecc_error_irq()
730 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_rreg()
795 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_wreg()