Lines Matching refs:adev
80 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) in amdgpu_amdkfd_device_probe() argument
87 switch (adev->asic_type) { in amdgpu_amdkfd_device_probe()
106 dev_info(adev->dev, "kfd not supported on this ASIC\n"); in amdgpu_amdkfd_device_probe()
110 adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, in amdgpu_amdkfd_device_probe()
111 adev->pdev, kfd2kgd); in amdgpu_amdkfd_device_probe()
127 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, in amdgpu_doorbell_get_kfd_info() argument
136 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { in amdgpu_doorbell_get_kfd_info()
137 *aperture_base = adev->doorbell.base; in amdgpu_doorbell_get_kfd_info()
138 *aperture_size = adev->doorbell.size; in amdgpu_doorbell_get_kfd_info()
139 *start_offset = adev->doorbell.num_doorbells * sizeof(u32); in amdgpu_doorbell_get_kfd_info()
147 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) in amdgpu_amdkfd_device_init() argument
151 if (adev->kfd) { in amdgpu_amdkfd_device_init()
154 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, in amdgpu_amdkfd_device_init()
155 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, in amdgpu_amdkfd_device_init()
156 .gpuvm_size = min(adev->vm_manager.max_pfn in amdgpu_amdkfd_device_init()
159 .drm_render_minor = adev->ddev->render->index in amdgpu_amdkfd_device_init()
165 adev->gfx.mec.queue_bitmap, in amdgpu_amdkfd_device_init()
169 if (adev->gfx.kiq.ring.ready) in amdgpu_amdkfd_device_init()
170 clear_bit(amdgpu_gfx_queue_to_bit(adev, in amdgpu_amdkfd_device_init()
171 adev->gfx.kiq.ring.me - 1, in amdgpu_amdkfd_device_init()
172 adev->gfx.kiq.ring.pipe, in amdgpu_amdkfd_device_init()
173 adev->gfx.kiq.ring.queue), in amdgpu_amdkfd_device_init()
179 * adev->gfx.mec.num_pipe_per_mec in amdgpu_amdkfd_device_init()
180 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_amdkfd_device_init()
184 amdgpu_doorbell_get_kfd_info(adev, in amdgpu_amdkfd_device_init()
188 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_amdkfd_device_init()
211 kgd2kfd->device_init(adev->kfd, &gpu_resources); in amdgpu_amdkfd_device_init()
215 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) in amdgpu_amdkfd_device_fini() argument
217 if (adev->kfd) { in amdgpu_amdkfd_device_fini()
218 kgd2kfd->device_exit(adev->kfd); in amdgpu_amdkfd_device_fini()
219 adev->kfd = NULL; in amdgpu_amdkfd_device_fini()
223 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, in amdgpu_amdkfd_interrupt() argument
226 if (adev->kfd) in amdgpu_amdkfd_interrupt()
227 kgd2kfd->interrupt(adev->kfd, ih_ring_entry); in amdgpu_amdkfd_interrupt()
230 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) in amdgpu_amdkfd_suspend() argument
232 if (adev->kfd) in amdgpu_amdkfd_suspend()
233 kgd2kfd->suspend(adev->kfd); in amdgpu_amdkfd_suspend()
236 int amdgpu_amdkfd_resume(struct amdgpu_device *adev) in amdgpu_amdkfd_resume() argument
240 if (adev->kfd) in amdgpu_amdkfd_resume()
241 r = kgd2kfd->resume(adev->kfd); in amdgpu_amdkfd_resume()
246 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) in amdgpu_amdkfd_pre_reset() argument
250 if (adev->kfd) in amdgpu_amdkfd_pre_reset()
251 r = kgd2kfd->pre_reset(adev->kfd); in amdgpu_amdkfd_pre_reset()
256 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) in amdgpu_amdkfd_post_reset() argument
260 if (adev->kfd) in amdgpu_amdkfd_post_reset()
261 r = kgd2kfd->post_reset(adev->kfd); in amdgpu_amdkfd_post_reset()
268 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in amdgpu_amdkfd_gpu_reset() local
270 amdgpu_device_gpu_recover(adev, NULL, false); in amdgpu_amdkfd_gpu_reset()
277 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in alloc_gtt_mem() local
294 r = amdgpu_bo_create(adev, &bp, &bo); in alloc_gtt_mem()
296 dev_err(adev->dev, in alloc_gtt_mem()
304 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r); in alloc_gtt_mem()
310 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r); in alloc_gtt_mem()
316 dev_err(adev->dev, "%p bind failed\n", bo); in alloc_gtt_mem()
322 dev_err(adev->dev, in alloc_gtt_mem()
359 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in get_local_mem_info() local
360 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : in get_local_mem_info()
362 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size; in get_local_mem_info()
365 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) { in get_local_mem_info()
366 mem_info->local_mem_size_public = adev->gmc.visible_vram_size; in get_local_mem_info()
367 mem_info->local_mem_size_private = adev->gmc.real_vram_size - in get_local_mem_info()
368 adev->gmc.visible_vram_size; in get_local_mem_info()
371 mem_info->local_mem_size_private = adev->gmc.real_vram_size; in get_local_mem_info()
373 mem_info->vram_width = adev->gmc.vram_width; in get_local_mem_info()
376 &adev->gmc.aper_base, &aper_limit, in get_local_mem_info()
380 if (amdgpu_sriov_vf(adev)) in get_local_mem_info()
381 mem_info->mem_clk_max = adev->clock.default_mclk / 100; in get_local_mem_info()
382 else if (adev->powerplay.pp_funcs) in get_local_mem_info()
383 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; in get_local_mem_info()
390 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in get_gpu_clock_counter() local
392 if (adev->gfx.funcs->get_gpu_clock_counter) in get_gpu_clock_counter()
393 return adev->gfx.funcs->get_gpu_clock_counter(adev); in get_gpu_clock_counter()
399 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in get_max_engine_clock_in_mhz() local
402 if (amdgpu_sriov_vf(adev)) in get_max_engine_clock_in_mhz()
403 return adev->clock.default_sclk / 100; in get_max_engine_clock_in_mhz()
404 else if (adev->powerplay.pp_funcs) in get_max_engine_clock_in_mhz()
405 return amdgpu_dpm_get_sclk(adev, false) / 100; in get_max_engine_clock_in_mhz()
412 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in get_cu_info() local
413 struct amdgpu_cu_info acu_info = adev->gfx.cu_info; in get_cu_info()
423 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; in get_cu_info()
424 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; in get_cu_info()
425 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; in get_cu_info()
435 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in amdgpu_amdkfd_get_vram_usage() local
437 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); in amdgpu_amdkfd_get_vram_usage()
444 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in amdgpu_amdkfd_submit_ib() local
453 ring = &adev->gfx.compute_ring[0]; in amdgpu_amdkfd_submit_ib()
456 ring = &adev->sdma.instance[0].ring; in amdgpu_amdkfd_submit_ib()
459 ring = &adev->sdma.instance[1].ring; in amdgpu_amdkfd_submit_ib()
467 ret = amdgpu_job_alloc(adev, 1, &job, NULL); in amdgpu_amdkfd_submit_ib()
497 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; in amdgpu_amdkfd_set_compute_idle() local
499 amdgpu_dpm_switch_power_profile(adev, in amdgpu_amdkfd_set_compute_idle()
503 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) in amdgpu_amdkfd_is_kfd_vmid() argument
505 if (adev->kfd) { in amdgpu_amdkfd_is_kfd_vmid()
523 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, in amdgpu_amdkfd_gpuvm_destroy_cb() argument