Lines Matching full:gfx
31 /* delay 0.1 second to enable gfx off feature */
35 * GPU GFX IP block helpers function.
43 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit()
44 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
45 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
54 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue()
55 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
56 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
57 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
58 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
66 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_is_mec_queue_enabled()
74 bit += me * adev->gfx.me.num_pipe_per_me in amdgpu_gfx_me_queue_to_bit()
75 * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
76 bit += pipe * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
85 *queue = bit % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_bit_to_me_queue()
86 *pipe = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
87 % adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
88 *me = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
89 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
96 adev->gfx.me.queue_bitmap); in amdgpu_gfx_is_me_queue_enabled()
112 i = ffs(adev->gfx.scratch.free_mask); in amdgpu_gfx_scratch_get()
113 if (i != 0 && i <= adev->gfx.scratch.num_reg) { in amdgpu_gfx_scratch_get()
115 adev->gfx.scratch.free_mask &= ~(1u << i); in amdgpu_gfx_scratch_get()
116 *reg = adev->gfx.scratch.reg_base + i; in amdgpu_gfx_scratch_get()
132 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); in amdgpu_gfx_scratch_free()
192 return adev->gfx.mec.num_mec > 1; in amdgpu_gfx_is_multipipe_capable()
207 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * in amdgpu_gfx_compute_queue_acquire()
208 adev->gfx.mec.num_queue_per_pipe, in amdgpu_gfx_compute_queue_acquire()
209 adev->gfx.num_compute_rings); in amdgpu_gfx_compute_queue_acquire()
214 pipe = i % adev->gfx.mec.num_pipe_per_mec; in amdgpu_gfx_compute_queue_acquire()
215 queue = (i / adev->gfx.mec.num_pipe_per_mec) % in amdgpu_gfx_compute_queue_acquire()
216 adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_compute_queue_acquire()
218 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, in amdgpu_gfx_compute_queue_acquire()
219 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
224 set_bit(i, adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
227 …dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGP… in amdgpu_gfx_compute_queue_acquire()
235 queue = i % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
236 me = (i / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_graphics_queue_acquire()
237 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_graphics_queue_acquire()
239 if (me >= adev->gfx.me.num_me) in amdgpu_gfx_graphics_queue_acquire()
244 set_bit(i, adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
248 adev->gfx.num_gfx_rings = in amdgpu_gfx_graphics_queue_acquire()
249 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in amdgpu_gfx_graphics_queue_acquire()
258 queue_bit = adev->gfx.mec.num_mec in amdgpu_gfx_kiq_acquire()
259 * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_kiq_acquire()
260 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_kiq_acquire()
263 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_kiq_acquire()
291 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init_ring()
324 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_fini()
334 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init()
355 /* create MQD for each compute/gfx queue */
363 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_init()
379 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
380 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) in amdgpu_gfx_mqd_sw_init()
386 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_init()
387 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_init()
398 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
399 if (!adev->gfx.me.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
406 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_init()
407 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_init()
418 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
419 if (!adev->gfx.mec.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
433 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
434 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_fini()
435 kfree(adev->gfx.me.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
442 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
443 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_fini()
444 kfree(adev->gfx.mec.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
450 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_fini()
451 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); in amdgpu_gfx_mqd_sw_fini()
459 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_disable_kcq()
467 adev->gfx.num_compute_rings)) in amdgpu_gfx_disable_kcq()
470 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_disable_kcq()
471 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], in amdgpu_gfx_disable_kcq()
492 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_enable_kcq()
493 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; in amdgpu_gfx_enable_kcq()
501 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_enable_kcq()
519 adev->gfx.num_compute_rings + in amdgpu_gfx_enable_kcq()
527 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_enable_kcq()
528 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); in amdgpu_gfx_enable_kcq()
537 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
540 * @bool enable true: enable gfx off feature, false: disable gfx off feature
542 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
543 * 2. other client can send request to disable gfx off feature, the request should be honored.
544 * 3. other client can cancel their request of disable gfx off feature
545 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
553 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
556 adev->gfx.gfx_off_req_count++; in amdgpu_gfx_off_ctrl()
557 else if (adev->gfx.gfx_off_req_count > 0) in amdgpu_gfx_off_ctrl()
558 adev->gfx.gfx_off_req_count--; in amdgpu_gfx_off_ctrl()
560 if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { in amdgpu_gfx_off_ctrl()
561 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); in amdgpu_gfx_off_ctrl()
562 } else if (!enable && adev->gfx.gfx_off_state) { in amdgpu_gfx_off_ctrl()
564 adev->gfx.gfx_off_state = false; in amdgpu_gfx_off_ctrl()
566 if (adev->gfx.funcs->init_spm_golden) { in amdgpu_gfx_off_ctrl()
573 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
581 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
585 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
600 if (!adev->gfx.ras_if) { in amdgpu_gfx_ras_late_init()
601 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); in amdgpu_gfx_ras_late_init()
602 if (!adev->gfx.ras_if) in amdgpu_gfx_ras_late_init()
604 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; in amdgpu_gfx_ras_late_init()
605 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in amdgpu_gfx_ras_late_init()
606 adev->gfx.ras_if->sub_block_index = 0; in amdgpu_gfx_ras_late_init()
607 strcpy(adev->gfx.ras_if->name, "gfx"); in amdgpu_gfx_ras_late_init()
609 fs_info.head = ih_info.head = *adev->gfx.ras_if; in amdgpu_gfx_ras_late_init()
611 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, in amdgpu_gfx_ras_late_init()
616 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { in amdgpu_gfx_ras_late_init()
617 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); in amdgpu_gfx_ras_late_init()
621 /* free gfx ras_if if ras is not supported */ in amdgpu_gfx_ras_late_init()
628 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); in amdgpu_gfx_ras_late_init()
630 kfree(adev->gfx.ras_if); in amdgpu_gfx_ras_late_init()
631 adev->gfx.ras_if = NULL; in amdgpu_gfx_ras_late_init()
638 adev->gfx.ras_if) { in amdgpu_gfx_ras_fini()
639 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_ras_fini()
662 if (adev->gfx.funcs->query_ras_error_count) in amdgpu_gfx_process_ras_data_cb()
663 adev->gfx.funcs->query_ras_error_count(adev, err_data); in amdgpu_gfx_process_ras_data_cb()
673 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_cp_ecc_error_irq()
693 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_rreg()
758 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_wreg()