/Linux-v6.1/drivers/xen/ |
D | privcmd.c | 621 struct privcmd_dm_op kdata; in privcmd_ioctl_dm_op() local 630 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_dm_op() 634 if (data->domid != DOMID_INVALID && data->domid != kdata.dom) in privcmd_ioctl_dm_op() 637 if (kdata.num == 0) in privcmd_ioctl_dm_op() 640 if (kdata.num > privcmd_dm_op_max_num) in privcmd_ioctl_dm_op() 643 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL); in privcmd_ioctl_dm_op() 647 if (copy_from_user(kbufs, kdata.ubufs, in privcmd_ioctl_dm_op() 648 sizeof(*kbufs) * kdata.num)) { in privcmd_ioctl_dm_op() 653 for (i = 0; i < kdata.num; i++) { in privcmd_ioctl_dm_op() 676 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL); in privcmd_ioctl_dm_op() [all …]
|
/Linux-v6.1/kernel/trace/ |
D | trace_hwlat.c | 165 struct hwlat_kthread_data *kdata = get_cpu_data(); in trace_hwlat_callback() local 167 if (!kdata->kthread) in trace_hwlat_callback() 176 kdata->nmi_ts_start = time_get(); in trace_hwlat_callback() 178 kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start; in trace_hwlat_callback() 182 kdata->nmi_count++; in trace_hwlat_callback() 203 struct hwlat_kthread_data *kdata = get_cpu_data(); in get_sample() local 216 kdata->nmi_total_ts = 0; in get_sample() 217 kdata->nmi_count = 0; in get_sample() 287 if (kdata->nmi_total_ts) in get_sample() 288 do_div(kdata->nmi_total_ts, NSEC_PER_USEC); in get_sample() [all …]
|
/Linux-v6.1/kernel/ |
D | capability.c | 167 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; in SYSCALL_DEFINE2() local 171 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2() 172 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2() 173 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2() 195 if (copy_to_user(dataptr, kdata, tocopy in SYSCALL_DEFINE2() 224 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; in SYSCALL_DEFINE2() local 243 if (copybytes > sizeof(kdata)) in SYSCALL_DEFINE2() 246 if (copy_from_user(&kdata, data, copybytes)) in SYSCALL_DEFINE2() 250 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2() 251 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2() [all …]
|
/Linux-v6.1/drivers/dma-buf/ |
D | dma-heap.c | 130 char *kdata = stack_kdata; in dma_heap_ioctl() local 155 kdata = kmalloc(ksize, GFP_KERNEL); in dma_heap_ioctl() 156 if (!kdata) in dma_heap_ioctl() 160 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in dma_heap_ioctl() 167 memset(kdata + in_size, 0, ksize - in_size); in dma_heap_ioctl() 171 ret = dma_heap_ioctl_allocate(file, kdata); in dma_heap_ioctl() 178 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in dma_heap_ioctl() 181 if (kdata != stack_kdata) in dma_heap_ioctl() 182 kfree(kdata); in dma_heap_ioctl()
|
/Linux-v6.1/net/ipv4/ |
D | bpf_tcp_ca.c | 221 void *kdata, const void *udata) in bpf_tcp_ca_init_member() argument 228 tcp_ca = (struct tcp_congestion_ops *)kdata; in bpf_tcp_ca_init_member() 257 static int bpf_tcp_ca_reg(void *kdata) in bpf_tcp_ca_reg() argument 259 return tcp_register_congestion_control(kdata); in bpf_tcp_ca_reg() 262 static void bpf_tcp_ca_unreg(void *kdata) in bpf_tcp_ca_unreg() argument 264 tcp_unregister_congestion_control(kdata); in bpf_tcp_ca_unreg()
|
/Linux-v6.1/drivers/gpu/drm/ |
D | drm_ioctl.c | 771 long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, in drm_ioctl_kernel() argument 788 retcode = func(dev, kdata, file_priv); in drm_ioctl_kernel() 791 retcode = func(dev, kdata, file_priv); in drm_ioctl_kernel() 821 char *kdata = NULL; in drm_ioctl() local 874 kdata = stack_kdata; in drm_ioctl() 876 kdata = kmalloc(ksize, GFP_KERNEL); in drm_ioctl() 877 if (!kdata) { in drm_ioctl() 883 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in drm_ioctl() 889 memset(kdata + in_size, 0, ksize - in_size); in drm_ioctl() 891 retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags); in drm_ioctl() [all …]
|
/Linux-v6.1/kernel/bpf/ |
D | bpf_struct_ops.c | 361 void *udata, *kdata; in bpf_struct_ops_map_update_elem() local 401 kdata = &kvalue->data; in bpf_struct_ops_map_update_elem() 416 *(void **)(kdata + moff) = BPF_MODULE_OWNER; in bpf_struct_ops_map_update_elem() 420 err = st_ops->init_member(t, member, kdata, udata); in bpf_struct_ops_map_update_elem() 487 *(void **)(kdata + moff) = image; in bpf_struct_ops_map_update_elem() 499 err = st_ops->reg(kdata); in bpf_struct_ops_map_update_elem() 661 bool bpf_struct_ops_get(const void *kdata) in bpf_struct_ops_get() argument 665 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); in bpf_struct_ops_get() 678 void bpf_struct_ops_put(const void *kdata) in bpf_struct_ops_put() argument 682 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); in bpf_struct_ops_put()
|
/Linux-v6.1/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 109 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; in radeon_cs_parser_relocs() 350 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in radeon_cs_parser_init() 352 if (p->chunks[i].kdata == NULL) { in radeon_cs_parser_init() 355 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in radeon_cs_parser_init() 359 p->cs_flags = p->chunks[i].kdata[0]; in radeon_cs_parser_init() 361 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init() 363 priority = (s32)p->chunks[i].kdata[2]; in radeon_cs_parser_init() 454 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini() 658 if (ib_chunk->kdata) in radeon_cs_ib_fill() 659 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); in radeon_cs_ib_fill() [all …]
|
D | radeon.h | 1051 uint32_t *kdata; member 1091 if (ibc->kdata) in radeon_get_ib_value() 1092 return ibc->kdata[idx]; in radeon_get_ib_value()
|
/Linux-v6.1/net/bpf/ |
D | bpf_dummy_struct_ops.c | 195 void *kdata, const void *udata) in bpf_dummy_init_member() argument 200 static int bpf_dummy_reg(void *kdata) in bpf_dummy_reg() argument 205 static void bpf_dummy_unreg(void *kdata) in bpf_dummy_unreg() argument
|
/Linux-v6.1/arch/arm64/kernel/ |
D | ptrace.c | 1859 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) in compat_ptrace_hbp_get_resource_info() argument 1877 *kdata = reg; in compat_ptrace_hbp_get_resource_info() 1884 u32 *kdata) in compat_ptrace_hbp_get() argument 1893 *kdata = (u32)addr; in compat_ptrace_hbp_get() 1896 *kdata = ctrl; in compat_ptrace_hbp_get() 1905 u32 *kdata) in compat_ptrace_hbp_set() argument 1913 addr = *kdata; in compat_ptrace_hbp_set() 1916 ctrl = *kdata; in compat_ptrace_hbp_set() 1927 u32 kdata; in compat_ptrace_gethbpregs() local 1931 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); in compat_ptrace_gethbpregs() [all …]
|
/Linux-v6.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_cs.c | 234 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), in amdgpu_cs_pass1() 236 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 242 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_pass1() 254 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); in amdgpu_cs_pass1() 263 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, in amdgpu_cs_pass1() 273 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); in amdgpu_cs_pass1() 326 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1() 341 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; in amdgpu_cs_p2_ib() 394 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; in amdgpu_cs_p2_dependencies() 467 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_in() [all …]
|
D | amdgpu_cs.h | 37 void *kdata; member
|
/Linux-v6.1/drivers/misc/habanalabs/common/ |
D | habanalabs_ioctl.c | 994 char *kdata = NULL; in _hl_ioctl() local 1018 kdata = stack_kdata; in _hl_ioctl() 1020 kdata = kzalloc(asize, GFP_KERNEL); in _hl_ioctl() 1021 if (!kdata) { in _hl_ioctl() 1029 if (copy_from_user(kdata, (void __user *)arg, usize)) { in _hl_ioctl() 1034 memset(kdata, 0, usize); in _hl_ioctl() 1037 retcode = func(hpriv, kdata); in _hl_ioctl() 1039 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) in _hl_ioctl() 1047 if (kdata != stack_kdata) in _hl_ioctl() 1048 kfree(kdata); in _hl_ioctl()
|
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/flower/ |
D | conntrack.c | 793 u8 *key, *msk, *kdata, *mdata; in nfp_fl_ct_add_offload() local 832 kdata = flow_pay->unmasked_data; in nfp_fl_ct_add_offload() 836 key = kdata + offset; in nfp_fl_ct_add_offload() 844 key = kdata + offset; in nfp_fl_ct_add_offload() 857 key = kdata + offset; in nfp_fl_ct_add_offload() 880 key = kdata + offset; in nfp_fl_ct_add_offload() 890 key = kdata + offset; in nfp_fl_ct_add_offload() 906 key = kdata + offset; in nfp_fl_ct_add_offload() 917 key = kdata + offset; in nfp_fl_ct_add_offload() 928 key = kdata + offset; in nfp_fl_ct_add_offload() [all …]
|
/Linux-v6.1/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_chardev.c | 2763 char *kdata = NULL; in kfd_ioctl() local 2830 kdata = stack_kdata; in kfd_ioctl() 2832 kdata = kmalloc(asize, GFP_KERNEL); in kfd_ioctl() 2833 if (!kdata) { in kfd_ioctl() 2839 memset(kdata + usize, 0, asize - usize); in kfd_ioctl() 2843 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { in kfd_ioctl() 2848 memset(kdata, 0, usize); in kfd_ioctl() 2851 retcode = func(filep, process, kdata); in kfd_ioctl() 2854 if (copy_to_user((void __user *)arg, kdata, usize) != 0) in kfd_ioctl() 2862 if (kdata != stack_kdata) in kfd_ioctl() [all …]
|
/Linux-v6.1/include/linux/ |
D | bpf.h | 1313 void *kdata, const void *udata); 1314 int (*reg)(void *kdata); 1315 void (*unreg)(void *kdata); 1328 bool bpf_struct_ops_get(const void *kdata); 1329 void bpf_struct_ops_put(const void *kdata);
|
/Linux-v6.1/tools/power/pm-graph/ |
D | sleepgraph.py | 605 def defaultKprobe(self, name, kdata): argument 606 k = kdata
|