Lines Matching refs:args
139 struct kfd_ioctl_get_version_args *args = data; in kfd_ioctl_get_version() local
141 args->major_version = KFD_IOCTL_MAJOR_VERSION; in kfd_ioctl_get_version()
142 args->minor_version = KFD_IOCTL_MINOR_VERSION; in kfd_ioctl_get_version()
148 struct kfd_ioctl_create_queue_args *args) in set_queue_properties_from_user() argument
150 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in set_queue_properties_from_user()
155 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in set_queue_properties_from_user()
160 if ((args->ring_base_address) && in set_queue_properties_from_user()
161 (!access_ok((const void __user *) args->ring_base_address, in set_queue_properties_from_user()
167 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in set_queue_properties_from_user()
172 if (!access_ok((const void __user *) args->read_pointer_address, in set_queue_properties_from_user()
178 if (!access_ok((const void __user *) args->write_pointer_address, in set_queue_properties_from_user()
184 if (args->eop_buffer_address && in set_queue_properties_from_user()
185 !access_ok((const void __user *) args->eop_buffer_address, in set_queue_properties_from_user()
191 if (args->ctx_save_restore_address && in set_queue_properties_from_user()
192 !access_ok((const void __user *) args->ctx_save_restore_address, in set_queue_properties_from_user()
199 q_properties->queue_percent = args->queue_percentage; in set_queue_properties_from_user()
200 q_properties->priority = args->queue_priority; in set_queue_properties_from_user()
201 q_properties->queue_address = args->ring_base_address; in set_queue_properties_from_user()
202 q_properties->queue_size = args->ring_size; in set_queue_properties_from_user()
203 q_properties->read_ptr = (uint32_t *) args->read_pointer_address; in set_queue_properties_from_user()
204 q_properties->write_ptr = (uint32_t *) args->write_pointer_address; in set_queue_properties_from_user()
205 q_properties->eop_ring_buffer_address = args->eop_buffer_address; in set_queue_properties_from_user()
206 q_properties->eop_ring_buffer_size = args->eop_buffer_size; in set_queue_properties_from_user()
208 args->ctx_save_restore_address; in set_queue_properties_from_user()
209 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; in set_queue_properties_from_user()
210 q_properties->ctl_stack_size = args->ctl_stack_size; in set_queue_properties_from_user()
211 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || in set_queue_properties_from_user()
212 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
214 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) in set_queue_properties_from_user()
216 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) in set_queue_properties_from_user()
221 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
227 q_properties->queue_percent, args->queue_percentage); in set_queue_properties_from_user()
230 q_properties->priority, args->queue_priority); in set_queue_properties_from_user()
233 q_properties->queue_address, args->ring_base_address); in set_queue_properties_from_user()
236 q_properties->queue_size, args->ring_size); in set_queue_properties_from_user()
255 struct kfd_ioctl_create_queue_args *args = data; in kfd_ioctl_create_queue() local
266 err = set_queue_properties_from_user(&q_properties, args); in kfd_ioctl_create_queue()
270 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
271 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_create_queue()
273 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
293 args->queue_id = queue_id; in kfd_ioctl_create_queue()
297 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; in kfd_ioctl_create_queue()
298 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_create_queue()
299 args->doorbell_offset <<= PAGE_SHIFT; in kfd_ioctl_create_queue()
306 args->doorbell_offset |= q_properties.doorbell_off; in kfd_ioctl_create_queue()
310 pr_debug("Queue id %d was created successfully\n", args->queue_id); in kfd_ioctl_create_queue()
313 args->ring_base_address); in kfd_ioctl_create_queue()
316 args->read_pointer_address); in kfd_ioctl_create_queue()
319 args->write_pointer_address); in kfd_ioctl_create_queue()
333 struct kfd_ioctl_destroy_queue_args *args = data; in kfd_ioctl_destroy_queue() local
336 args->queue_id, in kfd_ioctl_destroy_queue()
341 retval = pqm_destroy_queue(&p->pqm, args->queue_id); in kfd_ioctl_destroy_queue()
351 struct kfd_ioctl_update_queue_args *args = data; in kfd_ioctl_update_queue() local
354 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in kfd_ioctl_update_queue()
359 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in kfd_ioctl_update_queue()
364 if ((args->ring_base_address) && in kfd_ioctl_update_queue()
365 (!access_ok((const void __user *) args->ring_base_address, in kfd_ioctl_update_queue()
371 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in kfd_ioctl_update_queue()
376 properties.queue_address = args->ring_base_address; in kfd_ioctl_update_queue()
377 properties.queue_size = args->ring_size; in kfd_ioctl_update_queue()
378 properties.queue_percent = args->queue_percentage; in kfd_ioctl_update_queue()
379 properties.priority = args->queue_priority; in kfd_ioctl_update_queue()
382 args->queue_id, p->pasid); in kfd_ioctl_update_queue()
386 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); in kfd_ioctl_update_queue()
398 struct kfd_ioctl_set_cu_mask_args *args = data; in kfd_ioctl_set_cu_mask() local
400 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; in kfd_ioctl_set_cu_mask()
401 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); in kfd_ioctl_set_cu_mask()
403 if ((args->num_cu_mask % 32) != 0) { in kfd_ioctl_set_cu_mask()
405 args->num_cu_mask); in kfd_ioctl_set_cu_mask()
409 properties.cu_mask_count = args->num_cu_mask; in kfd_ioctl_set_cu_mask()
438 retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties); in kfd_ioctl_set_cu_mask()
451 struct kfd_ioctl_get_queue_wave_state_args *args = data; in kfd_ioctl_get_queue_wave_state() local
456 r = pqm_get_wave_state(&p->pqm, args->queue_id, in kfd_ioctl_get_queue_wave_state()
457 (void __user *)args->ctl_stack_address, in kfd_ioctl_get_queue_wave_state()
458 &args->ctl_stack_used_size, in kfd_ioctl_get_queue_wave_state()
459 &args->save_area_used_size); in kfd_ioctl_get_queue_wave_state()
469 struct kfd_ioctl_set_memory_policy_args *args = data; in kfd_ioctl_set_memory_policy() local
475 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
476 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
480 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
481 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
485 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_memory_policy()
497 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
501 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
508 (void __user *)args->alternate_aperture_base, in kfd_ioctl_set_memory_policy()
509 args->alternate_aperture_size)) in kfd_ioctl_set_memory_policy()
521 struct kfd_ioctl_set_trap_handler_args *args = data; in kfd_ioctl_set_trap_handler() local
526 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_trap_handler()
540 args->tba_addr, in kfd_ioctl_set_trap_handler()
541 args->tma_addr)) in kfd_ioctl_set_trap_handler()
553 struct kfd_ioctl_dbg_register_args *args = data; in kfd_ioctl_dbg_register() local
560 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_register()
607 struct kfd_ioctl_dbg_unregister_args *args = data; in kfd_ioctl_dbg_unregister() local
611 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_unregister()
645 struct kfd_ioctl_dbg_address_watch_args *args = data; in kfd_ioctl_dbg_address_watch() local
656 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_address_watch()
665 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_address_watch()
669 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || in kfd_ioctl_dbg_address_watch()
670 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || in kfd_ioctl_dbg_address_watch()
676 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_address_watch()
697 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { in kfd_ioctl_dbg_address_watch()
721 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { in kfd_ioctl_dbg_address_watch()
745 struct kfd_ioctl_dbg_wave_control_args *args = data; in kfd_ioctl_dbg_wave_control() local
757 computed_buff_size = sizeof(*args) + in kfd_ioctl_dbg_wave_control()
764 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_wave_control()
774 if (args->buf_size_in_bytes != computed_buff_size) { in kfd_ioctl_dbg_wave_control()
776 args->buf_size_in_bytes, computed_buff_size); in kfd_ioctl_dbg_wave_control()
780 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_wave_control()
788 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_wave_control()
829 struct kfd_ioctl_get_clock_counters_args *args = data; in kfd_ioctl_get_clock_counters() local
832 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_clock_counters()
835 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); in kfd_ioctl_get_clock_counters()
838 args->gpu_clock_counter = 0; in kfd_ioctl_get_clock_counters()
841 args->cpu_clock_counter = ktime_get_raw_ns(); in kfd_ioctl_get_clock_counters()
842 args->system_clock_counter = ktime_get_boottime_ns(); in kfd_ioctl_get_clock_counters()
845 args->system_clock_freq = 1000000000; in kfd_ioctl_get_clock_counters()
854 struct kfd_ioctl_get_process_apertures_args *args = data; in kfd_ioctl_get_process_apertures() local
860 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures()
870 &args->process_apertures[args->num_of_nodes]; in kfd_ioctl_get_process_apertures()
880 "node id %u\n", args->num_of_nodes); in kfd_ioctl_get_process_apertures()
896 args->num_of_nodes++; in kfd_ioctl_get_process_apertures()
899 } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); in kfd_ioctl_get_process_apertures()
910 struct kfd_ioctl_get_process_apertures_new_args *args = data; in kfd_ioctl_get_process_apertures_new() local
918 if (args->num_of_nodes == 0) { in kfd_ioctl_get_process_apertures_new()
930 args->num_of_nodes++; in kfd_ioctl_get_process_apertures_new()
942 args->num_of_nodes), GFP_KERNEL); in kfd_ioctl_get_process_apertures_new()
949 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures_new()
982 } while (pdd && (nodes < args->num_of_nodes)); in kfd_ioctl_get_process_apertures_new()
985 args->num_of_nodes = nodes; in kfd_ioctl_get_process_apertures_new()
987 (void __user *)args->kfd_process_device_apertures_ptr, in kfd_ioctl_get_process_apertures_new()
1001 struct kfd_ioctl_create_event_args *args = data; in kfd_ioctl_create_event() local
1008 if (args->event_page_offset) { in kfd_ioctl_create_event()
1019 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); in kfd_ioctl_create_event()
1033 GET_IDR_HANDLE(args->event_page_offset)); in kfd_ioctl_create_event()
1036 args->event_page_offset); in kfd_ioctl_create_event()
1056 err = kfd_event_create(filp, p, args->event_type, in kfd_ioctl_create_event()
1057 args->auto_reset != 0, args->node_id, in kfd_ioctl_create_event()
1058 &args->event_id, &args->event_trigger_data, in kfd_ioctl_create_event()
1059 &args->event_page_offset, in kfd_ioctl_create_event()
1060 &args->event_slot_index); in kfd_ioctl_create_event()
1072 struct kfd_ioctl_destroy_event_args *args = data; in kfd_ioctl_destroy_event() local
1074 return kfd_event_destroy(p, args->event_id); in kfd_ioctl_destroy_event()
1080 struct kfd_ioctl_set_event_args *args = data; in kfd_ioctl_set_event() local
1082 return kfd_set_event(p, args->event_id); in kfd_ioctl_set_event()
1088 struct kfd_ioctl_reset_event_args *args = data; in kfd_ioctl_reset_event() local
1090 return kfd_reset_event(p, args->event_id); in kfd_ioctl_reset_event()
1096 struct kfd_ioctl_wait_events_args *args = data; in kfd_ioctl_wait_events() local
1099 err = kfd_wait_on_events(p, args->num_events, in kfd_ioctl_wait_events()
1100 (void __user *)args->events_ptr, in kfd_ioctl_wait_events()
1101 (args->wait_for_all != 0), in kfd_ioctl_wait_events()
1102 args->timeout, &args->wait_result); in kfd_ioctl_wait_events()
1109 struct kfd_ioctl_set_scratch_backing_va_args *args = data; in kfd_ioctl_set_scratch_backing_va() local
1114 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
1126 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
1133 dev->kgd, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
1145 struct kfd_ioctl_get_tile_config_args *args = data; in kfd_ioctl_get_tile_config() local
1150 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_tile_config()
1156 args->gb_addr_config = config.gb_addr_config; in kfd_ioctl_get_tile_config()
1157 args->num_banks = config.num_banks; in kfd_ioctl_get_tile_config()
1158 args->num_ranks = config.num_ranks; in kfd_ioctl_get_tile_config()
1160 if (args->num_tile_configs > config.num_tile_configs) in kfd_ioctl_get_tile_config()
1161 args->num_tile_configs = config.num_tile_configs; in kfd_ioctl_get_tile_config()
1162 err = copy_to_user((void __user *)args->tile_config_ptr, in kfd_ioctl_get_tile_config()
1164 args->num_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1166 args->num_tile_configs = 0; in kfd_ioctl_get_tile_config()
1170 if (args->num_macro_tile_configs > config.num_macro_tile_configs) in kfd_ioctl_get_tile_config()
1171 args->num_macro_tile_configs = in kfd_ioctl_get_tile_config()
1173 err = copy_to_user((void __user *)args->macro_tile_config_ptr, in kfd_ioctl_get_tile_config()
1175 args->num_macro_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1177 args->num_macro_tile_configs = 0; in kfd_ioctl_get_tile_config()
1187 struct kfd_ioctl_acquire_vm_args *args = data; in kfd_ioctl_acquire_vm() local
1193 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_acquire_vm()
1197 drm_file = fget(args->drm_fd); in kfd_ioctl_acquire_vm()
1250 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; in kfd_ioctl_alloc_memory_of_gpu() local
1256 uint64_t offset = args->mmap_offset; in kfd_ioctl_alloc_memory_of_gpu()
1257 uint32_t flags = args->flags; in kfd_ioctl_alloc_memory_of_gpu()
1259 if (args->size == 0) in kfd_ioctl_alloc_memory_of_gpu()
1262 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1274 if (args->size != kfd_doorbell_process_slice(dev)) in kfd_ioctl_alloc_memory_of_gpu()
1278 if (args->size != PAGE_SIZE) in kfd_ioctl_alloc_memory_of_gpu()
1294 dev->kgd, args->va_addr, args->size, in kfd_ioctl_alloc_memory_of_gpu()
1309 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_alloc_memory_of_gpu()
1310 args->mmap_offset = offset; in kfd_ioctl_alloc_memory_of_gpu()
1316 args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1317 args->mmap_offset <<= PAGE_SHIFT; in kfd_ioctl_alloc_memory_of_gpu()
1332 struct kfd_ioctl_free_memory_of_gpu_args *args = data; in kfd_ioctl_free_memory_of_gpu() local
1338 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1352 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1366 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1376 struct kfd_ioctl_map_memory_to_gpu_args *args = data; in kfd_ioctl_map_memory_to_gpu() local
1384 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1388 if (!args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1392 if (args->n_success > args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1397 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_map_memory_to_gpu()
1403 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_map_memory_to_gpu()
1404 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_map_memory_to_gpu()
1419 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1425 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1443 i, args->n_devices); in kfd_ioctl_map_memory_to_gpu()
1446 args->n_success = i+1; in kfd_ioctl_map_memory_to_gpu()
1458 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1486 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; in kfd_ioctl_unmap_memory_from_gpu() local
1493 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1497 if (!args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1501 if (args->n_success > args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1506 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_unmap_memory_from_gpu()
1512 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_unmap_memory_from_gpu()
1513 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_unmap_memory_from_gpu()
1528 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1534 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1550 i, args->n_devices); in kfd_ioctl_unmap_memory_from_gpu()
1553 args->n_success = i+1; in kfd_ioctl_unmap_memory_from_gpu()
1573 struct kfd_ioctl_get_dmabuf_info_args *args = data; in kfd_ioctl_get_dmabuf_info() local
1588 if (args->metadata_ptr) { in kfd_ioctl_get_dmabuf_info()
1589 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); in kfd_ioctl_get_dmabuf_info()
1595 r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, in kfd_ioctl_get_dmabuf_info()
1596 &dma_buf_kgd, &args->size, in kfd_ioctl_get_dmabuf_info()
1597 metadata_buffer, args->metadata_size, in kfd_ioctl_get_dmabuf_info()
1598 &args->metadata_size, &flags); in kfd_ioctl_get_dmabuf_info()
1608 args->gpu_id = dev->id; in kfd_ioctl_get_dmabuf_info()
1609 args->flags = flags; in kfd_ioctl_get_dmabuf_info()
1613 r = copy_to_user((void __user *)args->metadata_ptr, in kfd_ioctl_get_dmabuf_info()
1614 metadata_buffer, args->metadata_size); in kfd_ioctl_get_dmabuf_info()
1628 struct kfd_ioctl_import_dmabuf_args *args = data; in kfd_ioctl_import_dmabuf() local
1637 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_import_dmabuf()
1641 dmabuf = dma_buf_get(args->dmabuf_fd); in kfd_ioctl_import_dmabuf()
1654 args->va_addr, pdd->vm, in kfd_ioctl_import_dmabuf()
1668 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_import_dmabuf()