| /Linux-v5.10/drivers/gpu/drm/amd/amdkfd/ |
| D | kfd_device.c | 527 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 529 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 531 static int kfd_resume(struct kfd_dev *kfd); 536 struct kfd_dev *kfd; in kgd2kfd_probe() local 555 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe() 556 if (!kfd) in kgd2kfd_probe() 563 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd); in kgd2kfd_probe() 565 !kfd->pci_atomic_requested) { in kgd2kfd_probe() 569 kfree(kfd); in kgd2kfd_probe() 573 kfd->kgd = kgd; in kgd2kfd_probe() [all …]
|
| D | kfd_doorbell.c | 49 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) in kfd_doorbell_process_slice() argument 51 return roundup(kfd->device_info->doorbell_size * in kfd_doorbell_process_slice() 57 int kfd_doorbell_init(struct kfd_dev *kfd) in kfd_doorbell_init() argument 70 roundup(kfd->shared_resources.doorbell_start_offset, in kfd_doorbell_init() 71 kfd_doorbell_process_slice(kfd)); in kfd_doorbell_init() 74 rounddown(kfd->shared_resources.doorbell_aperture_size, in kfd_doorbell_init() 75 kfd_doorbell_process_slice(kfd)); in kfd_doorbell_init() 80 kfd_doorbell_process_slice(kfd); in kfd_doorbell_init() 84 if (!kfd->max_doorbell_slices || in kfd_doorbell_init() 85 doorbell_process_limit < kfd->max_doorbell_slices) in kfd_doorbell_init() [all …]
|
| D | kfd_interrupt.c | 52 int kfd_interrupt_init(struct kfd_dev *kfd) in kfd_interrupt_init() argument 56 r = kfifo_alloc(&kfd->ih_fifo, in kfd_interrupt_init() 57 KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size, in kfd_interrupt_init() 64 kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); in kfd_interrupt_init() 65 if (unlikely(!kfd->ih_wq)) { in kfd_interrupt_init() 66 kfifo_free(&kfd->ih_fifo); in kfd_interrupt_init() 70 spin_lock_init(&kfd->interrupt_lock); in kfd_interrupt_init() 72 INIT_WORK(&kfd->interrupt_work, interrupt_wq); in kfd_interrupt_init() 74 kfd->interrupts_active = true; in kfd_interrupt_init() 86 void kfd_interrupt_exit(struct kfd_dev *kfd) in kfd_interrupt_exit() argument [all …]
|
| D | kfd_iommu.c | 39 int kfd_iommu_check_device(struct kfd_dev *kfd) in kfd_iommu_check_device() argument 44 if (!kfd->use_iommu_v2) in kfd_iommu_check_device() 48 err = amd_iommu_device_info(kfd->pdev, &iommu_info); in kfd_iommu_check_device() 60 int kfd_iommu_device_init(struct kfd_dev *kfd) in kfd_iommu_device_init() argument 66 if (!kfd->use_iommu_v2) in kfd_iommu_device_init() 70 err = amd_iommu_device_info(kfd->pdev, &iommu_info); in kfd_iommu_device_init() 88 (unsigned int)(1 << kfd->device_info->max_pasid_bits), in kfd_iommu_device_init() 214 static int kfd_bind_processes_to_device(struct kfd_dev *kfd) in kfd_bind_processes_to_device() argument 225 pdd = kfd_get_process_device_data(kfd, p); in kfd_bind_processes_to_device() 232 err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, in kfd_bind_processes_to_device() [all …]
|
| D | kfd_iommu.h | 30 int kfd_iommu_check_device(struct kfd_dev *kfd); 31 int kfd_iommu_device_init(struct kfd_dev *kfd); 36 void kfd_iommu_suspend(struct kfd_dev *kfd); 37 int kfd_iommu_resume(struct kfd_dev *kfd); 43 static inline int kfd_iommu_check_device(struct kfd_dev *kfd) in kfd_iommu_check_device() argument 47 static inline int kfd_iommu_device_init(struct kfd_dev *kfd) in kfd_iommu_device_init() argument 62 static inline void kfd_iommu_suspend(struct kfd_dev *kfd) in kfd_iommu_suspend() argument 66 static inline int kfd_iommu_resume(struct kfd_dev *kfd) in kfd_iommu_resume() argument
|
| D | kfd_priv.h | 890 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 891 int kfd_doorbell_init(struct kfd_dev *kfd); 892 void kfd_doorbell_fini(struct kfd_dev *kfd); 895 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 897 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 901 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 905 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 907 void kfd_free_process_doorbells(struct kfd_dev *kfd, 911 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 914 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); [all …]
|
| D | kfd_mqd_manager_v9.c | 84 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 106 if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 110 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, in allocate_mqd() 117 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), in allocate_mqd() 292 struct kfd_dev *kfd = mm->dev; in free_mqd() local 295 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); in free_mqd()
|
| D | kfd_mqd_manager.h | 69 struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd,
|
| D | kfd_packet_manager_v9.c | 82 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9() local 94 kfd->max_proc_per_quantum); in pm_runlist_v9()
|
| D | kfd_packet_manager_vi.c | 79 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi() local 94 kfd->max_proc_per_quantum); in pm_runlist_vi()
|
| D | kfd_mqd_manager_cik.c | 75 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 80 if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd), in allocate_mqd()
|
| D | kfd_mqd_manager_v10.c | 75 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 80 if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), in allocate_mqd()
|
| D | kfd_mqd_manager_vi.c | 78 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 83 if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd), in allocate_mqd()
|
| D | kfd_chardev.c | 1030 struct kfd_dev *kfd; in kfd_ioctl_create_event() local 1040 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); in kfd_ioctl_create_event() 1041 if (!kfd) { in kfd_ioctl_create_event() 1047 pdd = kfd_bind_process_to_device(kfd, p); in kfd_ioctl_create_event() 1063 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, in kfd_ioctl_create_event()
|
| /Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_amdkfd.c | 76 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, in amdgpu_amdkfd_device_probe() 79 if (adev->kfd.dev) in amdgpu_amdkfd_device_probe() 121 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 172 kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources); in amdgpu_amdkfd_device_init() 178 if (adev->kfd.dev) { in amdgpu_amdkfd_device_fini() 179 kgd2kfd_device_exit(adev->kfd.dev); in amdgpu_amdkfd_device_fini() 180 adev->kfd.dev = NULL; in amdgpu_amdkfd_device_fini() 187 if (adev->kfd.dev) in amdgpu_amdkfd_interrupt() 188 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); in amdgpu_amdkfd_interrupt() 193 if (adev->kfd.dev) in amdgpu_amdkfd_suspend() [all …]
|
| D | amdgpu_amdkfd.h | 260 bool kgd2kfd_device_init(struct kfd_dev *kfd, 263 void kgd2kfd_device_exit(struct kfd_dev *kfd); 264 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 265 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 266 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 267 int kgd2kfd_post_reset(struct kfd_dev *kfd); 268 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 273 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 274 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
|
| D | amdgpu_umc.c | 97 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_umc_process_ras_data_cb()
|
| D | amdgpu_sdma.c | 162 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_sdma_process_ras_data_cb()
|
| D | amdgpu_amdkfd_gpuvm.c | 159 (adev->kfd.vram_used + vram_needed > in amdgpu_amdkfd_reserve_mem_limit() 165 adev->kfd.vram_used += vram_needed; in amdgpu_amdkfd_reserve_mem_limit() 191 adev->kfd.vram_used -= size; in unreserve_mem_limit() 192 WARN_ONCE(adev->kfd.vram_used < 0, in unreserve_mem_limit()
|
| D | amdgpu_gfx.c | 661 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_gfx_process_ras_data_cb()
|
| /Linux-v5.10/samples/bpf/ |
| D | task_fd_query_user.c | 223 int err, res, kfd, efd; in test_debug_fs_uprobe() local 228 kfd = open(buf, O_WRONLY | O_APPEND, 0); in test_debug_fs_uprobe() 229 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 238 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 240 close(kfd); in test_debug_fs_uprobe() 241 kfd = -1; in test_debug_fs_uprobe() 257 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 258 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 259 CHECK_PERROR_RET(ioctl(kfd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) < 0); in test_debug_fs_uprobe() 260 CHECK_PERROR_RET(ioctl(kfd, PERF_EVENT_IOC_ENABLE, 0) < 0); in test_debug_fs_uprobe() [all …]
|
| /Linux-v5.10/tools/perf/ |
| D | builtin-probe.c | 422 int ret, ret2, ufd = -1, kfd = -1; in perf_del_probe_events() local 436 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in perf_del_probe_events() 447 ret = probe_file__get_events(kfd, filter, klist); in perf_del_probe_events() 452 ret = probe_file__del_strlist(kfd, klist); in perf_del_probe_events() 475 if (kfd >= 0) in perf_del_probe_events() 476 close(kfd); in perf_del_probe_events()
|
| /Linux-v5.10/tools/perf/util/ |
| D | probe-file.c | 117 int probe_file__open_both(int *kfd, int *ufd, int flag) in probe_file__open_both() argument 119 if (!kfd || !ufd) in probe_file__open_both() 122 *kfd = open_kprobe_events(flag & PF_FL_RW); in probe_file__open_both() 124 if (*kfd < 0 && *ufd < 0) { in probe_file__open_both() 125 print_both_open_warning(*kfd, *ufd); in probe_file__open_both() 126 return *kfd; in probe_file__open_both()
|
| D | probe-file.h | 42 int probe_file__open_both(int *kfd, int *ufd, int flag);
|
| D | probe-event.c | 3568 int ret, ret2, ufd = -1, kfd = -1; in del_perf_probe_events() local 3575 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in del_perf_probe_events() 3579 ret = probe_file__del_events(kfd, filter); in del_perf_probe_events() 3591 if (kfd >= 0) in del_perf_probe_events() 3592 close(kfd); in del_perf_probe_events()
|