Lines Matching refs:args
137 struct kfd_ioctl_get_version_args *args = data; in kfd_ioctl_get_version() local
139 args->major_version = KFD_IOCTL_MAJOR_VERSION; in kfd_ioctl_get_version()
140 args->minor_version = KFD_IOCTL_MINOR_VERSION; in kfd_ioctl_get_version()
146 struct kfd_ioctl_create_queue_args *args) in set_queue_properties_from_user() argument
148 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in set_queue_properties_from_user()
153 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in set_queue_properties_from_user()
158 if ((args->ring_base_address) && in set_queue_properties_from_user()
160 (const void __user *) args->ring_base_address, in set_queue_properties_from_user()
166 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in set_queue_properties_from_user()
172 (const void __user *) args->read_pointer_address, in set_queue_properties_from_user()
179 (const void __user *) args->write_pointer_address, in set_queue_properties_from_user()
185 if (args->eop_buffer_address && in set_queue_properties_from_user()
187 (const void __user *) args->eop_buffer_address, in set_queue_properties_from_user()
193 if (args->ctx_save_restore_address && in set_queue_properties_from_user()
195 (const void __user *) args->ctx_save_restore_address, in set_queue_properties_from_user()
202 q_properties->queue_percent = args->queue_percentage; in set_queue_properties_from_user()
203 q_properties->priority = args->queue_priority; in set_queue_properties_from_user()
204 q_properties->queue_address = args->ring_base_address; in set_queue_properties_from_user()
205 q_properties->queue_size = args->ring_size; in set_queue_properties_from_user()
206 q_properties->read_ptr = (uint32_t *) args->read_pointer_address; in set_queue_properties_from_user()
207 q_properties->write_ptr = (uint32_t *) args->write_pointer_address; in set_queue_properties_from_user()
208 q_properties->eop_ring_buffer_address = args->eop_buffer_address; in set_queue_properties_from_user()
209 q_properties->eop_ring_buffer_size = args->eop_buffer_size; in set_queue_properties_from_user()
211 args->ctx_save_restore_address; in set_queue_properties_from_user()
212 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; in set_queue_properties_from_user()
213 q_properties->ctl_stack_size = args->ctl_stack_size; in set_queue_properties_from_user()
214 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || in set_queue_properties_from_user()
215 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
217 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) in set_queue_properties_from_user()
222 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
228 q_properties->queue_percent, args->queue_percentage); in set_queue_properties_from_user()
231 q_properties->priority, args->queue_priority); in set_queue_properties_from_user()
234 q_properties->queue_address, args->ring_base_address); in set_queue_properties_from_user()
237 q_properties->queue_size, args->ring_size); in set_queue_properties_from_user()
256 struct kfd_ioctl_create_queue_args *args = data; in kfd_ioctl_create_queue() local
267 err = set_queue_properties_from_user(&q_properties, args); in kfd_ioctl_create_queue()
271 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
272 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_create_queue()
274 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
294 args->queue_id = queue_id; in kfd_ioctl_create_queue()
298 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; in kfd_ioctl_create_queue()
299 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_create_queue()
300 args->doorbell_offset <<= PAGE_SHIFT; in kfd_ioctl_create_queue()
307 args->doorbell_offset |= q_properties.doorbell_off; in kfd_ioctl_create_queue()
311 pr_debug("Queue id %d was created successfully\n", args->queue_id); in kfd_ioctl_create_queue()
314 args->ring_base_address); in kfd_ioctl_create_queue()
317 args->read_pointer_address); in kfd_ioctl_create_queue()
320 args->write_pointer_address); in kfd_ioctl_create_queue()
334 struct kfd_ioctl_destroy_queue_args *args = data; in kfd_ioctl_destroy_queue() local
337 args->queue_id, in kfd_ioctl_destroy_queue()
342 retval = pqm_destroy_queue(&p->pqm, args->queue_id); in kfd_ioctl_destroy_queue()
352 struct kfd_ioctl_update_queue_args *args = data; in kfd_ioctl_update_queue() local
355 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in kfd_ioctl_update_queue()
360 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in kfd_ioctl_update_queue()
365 if ((args->ring_base_address) && in kfd_ioctl_update_queue()
367 (const void __user *) args->ring_base_address, in kfd_ioctl_update_queue()
373 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in kfd_ioctl_update_queue()
378 properties.queue_address = args->ring_base_address; in kfd_ioctl_update_queue()
379 properties.queue_size = args->ring_size; in kfd_ioctl_update_queue()
380 properties.queue_percent = args->queue_percentage; in kfd_ioctl_update_queue()
381 properties.priority = args->queue_priority; in kfd_ioctl_update_queue()
384 args->queue_id, p->pasid); in kfd_ioctl_update_queue()
388 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); in kfd_ioctl_update_queue()
400 struct kfd_ioctl_set_cu_mask_args *args = data; in kfd_ioctl_set_cu_mask() local
402 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; in kfd_ioctl_set_cu_mask()
403 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); in kfd_ioctl_set_cu_mask()
405 if ((args->num_cu_mask % 32) != 0) { in kfd_ioctl_set_cu_mask()
407 args->num_cu_mask); in kfd_ioctl_set_cu_mask()
411 properties.cu_mask_count = args->num_cu_mask; in kfd_ioctl_set_cu_mask()
440 retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties); in kfd_ioctl_set_cu_mask()
453 struct kfd_ioctl_set_memory_policy_args *args = data; in kfd_ioctl_set_memory_policy() local
459 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
460 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
464 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
465 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
469 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_memory_policy()
481 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
485 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
492 (void __user *)args->alternate_aperture_base, in kfd_ioctl_set_memory_policy()
493 args->alternate_aperture_size)) in kfd_ioctl_set_memory_policy()
505 struct kfd_ioctl_set_trap_handler_args *args = data; in kfd_ioctl_set_trap_handler() local
510 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_trap_handler()
524 args->tba_addr, in kfd_ioctl_set_trap_handler()
525 args->tma_addr)) in kfd_ioctl_set_trap_handler()
537 struct kfd_ioctl_dbg_register_args *args = data; in kfd_ioctl_dbg_register() local
544 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_register()
591 struct kfd_ioctl_dbg_unregister_args *args = data; in kfd_ioctl_dbg_unregister() local
595 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_unregister()
629 struct kfd_ioctl_dbg_address_watch_args *args = data; in kfd_ioctl_dbg_address_watch() local
640 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_address_watch()
649 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_address_watch()
653 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || in kfd_ioctl_dbg_address_watch()
654 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || in kfd_ioctl_dbg_address_watch()
660 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_address_watch()
681 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { in kfd_ioctl_dbg_address_watch()
705 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { in kfd_ioctl_dbg_address_watch()
729 struct kfd_ioctl_dbg_wave_control_args *args = data; in kfd_ioctl_dbg_wave_control() local
741 computed_buff_size = sizeof(*args) + in kfd_ioctl_dbg_wave_control()
748 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_wave_control()
758 if (args->buf_size_in_bytes != computed_buff_size) { in kfd_ioctl_dbg_wave_control()
760 args->buf_size_in_bytes, computed_buff_size); in kfd_ioctl_dbg_wave_control()
764 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_wave_control()
772 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_wave_control()
813 struct kfd_ioctl_get_clock_counters_args *args = data; in kfd_ioctl_get_clock_counters() local
816 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_clock_counters()
819 args->gpu_clock_counter = in kfd_ioctl_get_clock_counters()
823 args->gpu_clock_counter = 0; in kfd_ioctl_get_clock_counters()
826 args->cpu_clock_counter = ktime_get_raw_ns(); in kfd_ioctl_get_clock_counters()
827 args->system_clock_counter = ktime_get_boot_ns(); in kfd_ioctl_get_clock_counters()
830 args->system_clock_freq = 1000000000; in kfd_ioctl_get_clock_counters()
839 struct kfd_ioctl_get_process_apertures_args *args = data; in kfd_ioctl_get_process_apertures() local
845 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures()
855 &args->process_apertures[args->num_of_nodes]; in kfd_ioctl_get_process_apertures()
865 "node id %u\n", args->num_of_nodes); in kfd_ioctl_get_process_apertures()
881 args->num_of_nodes++; in kfd_ioctl_get_process_apertures()
884 } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); in kfd_ioctl_get_process_apertures()
895 struct kfd_ioctl_get_process_apertures_new_args *args = data; in kfd_ioctl_get_process_apertures_new() local
903 if (args->num_of_nodes == 0) { in kfd_ioctl_get_process_apertures_new()
915 args->num_of_nodes++; in kfd_ioctl_get_process_apertures_new()
927 args->num_of_nodes), GFP_KERNEL); in kfd_ioctl_get_process_apertures_new()
934 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures_new()
967 } while (pdd && (nodes < args->num_of_nodes)); in kfd_ioctl_get_process_apertures_new()
970 args->num_of_nodes = nodes; in kfd_ioctl_get_process_apertures_new()
972 (void __user *)args->kfd_process_device_apertures_ptr, in kfd_ioctl_get_process_apertures_new()
986 struct kfd_ioctl_create_event_args *args = data; in kfd_ioctl_create_event() local
993 if (args->event_page_offset) { in kfd_ioctl_create_event()
1004 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); in kfd_ioctl_create_event()
1018 GET_IDR_HANDLE(args->event_page_offset)); in kfd_ioctl_create_event()
1021 args->event_page_offset); in kfd_ioctl_create_event()
1041 err = kfd_event_create(filp, p, args->event_type, in kfd_ioctl_create_event()
1042 args->auto_reset != 0, args->node_id, in kfd_ioctl_create_event()
1043 &args->event_id, &args->event_trigger_data, in kfd_ioctl_create_event()
1044 &args->event_page_offset, in kfd_ioctl_create_event()
1045 &args->event_slot_index); in kfd_ioctl_create_event()
1057 struct kfd_ioctl_destroy_event_args *args = data; in kfd_ioctl_destroy_event() local
1059 return kfd_event_destroy(p, args->event_id); in kfd_ioctl_destroy_event()
1065 struct kfd_ioctl_set_event_args *args = data; in kfd_ioctl_set_event() local
1067 return kfd_set_event(p, args->event_id); in kfd_ioctl_set_event()
1073 struct kfd_ioctl_reset_event_args *args = data; in kfd_ioctl_reset_event() local
1075 return kfd_reset_event(p, args->event_id); in kfd_ioctl_reset_event()
1081 struct kfd_ioctl_wait_events_args *args = data; in kfd_ioctl_wait_events() local
1084 err = kfd_wait_on_events(p, args->num_events, in kfd_ioctl_wait_events()
1085 (void __user *)args->events_ptr, in kfd_ioctl_wait_events()
1086 (args->wait_for_all != 0), in kfd_ioctl_wait_events()
1087 args->timeout, &args->wait_result); in kfd_ioctl_wait_events()
1094 struct kfd_ioctl_set_scratch_backing_va_args *args = data; in kfd_ioctl_set_scratch_backing_va() local
1099 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
1111 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
1118 dev->kgd, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
1130 struct kfd_ioctl_get_tile_config_args *args = data; in kfd_ioctl_get_tile_config() local
1135 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_tile_config()
1141 args->gb_addr_config = config.gb_addr_config; in kfd_ioctl_get_tile_config()
1142 args->num_banks = config.num_banks; in kfd_ioctl_get_tile_config()
1143 args->num_ranks = config.num_ranks; in kfd_ioctl_get_tile_config()
1145 if (args->num_tile_configs > config.num_tile_configs) in kfd_ioctl_get_tile_config()
1146 args->num_tile_configs = config.num_tile_configs; in kfd_ioctl_get_tile_config()
1147 err = copy_to_user((void __user *)args->tile_config_ptr, in kfd_ioctl_get_tile_config()
1149 args->num_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1151 args->num_tile_configs = 0; in kfd_ioctl_get_tile_config()
1155 if (args->num_macro_tile_configs > config.num_macro_tile_configs) in kfd_ioctl_get_tile_config()
1156 args->num_macro_tile_configs = in kfd_ioctl_get_tile_config()
1158 err = copy_to_user((void __user *)args->macro_tile_config_ptr, in kfd_ioctl_get_tile_config()
1160 args->num_macro_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1162 args->num_macro_tile_configs = 0; in kfd_ioctl_get_tile_config()
1172 struct kfd_ioctl_acquire_vm_args *args = data; in kfd_ioctl_acquire_vm() local
1178 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_acquire_vm()
1182 drm_file = fget(args->drm_fd); in kfd_ioctl_acquire_vm()
1235 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; in kfd_ioctl_alloc_memory_of_gpu() local
1241 uint64_t offset = args->mmap_offset; in kfd_ioctl_alloc_memory_of_gpu()
1242 uint32_t flags = args->flags; in kfd_ioctl_alloc_memory_of_gpu()
1244 if (args->size == 0) in kfd_ioctl_alloc_memory_of_gpu()
1247 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1267 dev->kgd, args->va_addr, args->size, in kfd_ioctl_alloc_memory_of_gpu()
1282 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_alloc_memory_of_gpu()
1283 args->mmap_offset = offset; in kfd_ioctl_alloc_memory_of_gpu()
1297 struct kfd_ioctl_free_memory_of_gpu_args *args = data; in kfd_ioctl_free_memory_of_gpu() local
1303 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1317 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1330 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1340 struct kfd_ioctl_map_memory_to_gpu_args *args = data; in kfd_ioctl_map_memory_to_gpu() local
1348 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1352 if (!args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1356 if (args->n_success > args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1361 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_map_memory_to_gpu()
1367 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_map_memory_to_gpu()
1368 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_map_memory_to_gpu()
1383 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1389 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1407 i, args->n_devices); in kfd_ioctl_map_memory_to_gpu()
1410 args->n_success = i+1; in kfd_ioctl_map_memory_to_gpu()
1422 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1450 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; in kfd_ioctl_unmap_memory_from_gpu() local
1457 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1461 if (!args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1465 if (args->n_success > args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1470 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_unmap_memory_from_gpu()
1476 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_unmap_memory_from_gpu()
1477 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_unmap_memory_from_gpu()
1492 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1498 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1514 i, args->n_devices); in kfd_ioctl_unmap_memory_from_gpu()
1517 args->n_success = i+1; in kfd_ioctl_unmap_memory_from_gpu()