/Linux-v5.4/arch/mips/ar7/ |
D | prom.c | 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
/Linux-v5.4/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 303 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 334 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() [all …]
|
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/nfpcore/ |
D | nfp_nsp.c | 504 } *chunks; in nfp_nsp_command_buf_dma_sg() local 516 chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 517 if (!chunks) in nfp_nsp_command_buf_dma_sg() 525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
/Linux-v5.4/arch/x86/kernel/cpu/resctrl/ |
D | monitor.c | 219 u64 shift = 64 - MBM_CNTR_WIDTH, chunks; in mbm_overflow_count() local 221 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 222 return chunks >>= shift; in mbm_overflow_count() 228 u64 chunks, tval; in __mon_event_count() local 259 chunks = mbm_overflow_count(m->prev_msr, tval); in __mon_event_count() 260 m->chunks += chunks; in __mon_event_count() 263 rr->val += m->chunks; in __mon_event_count() 275 u64 tval, cur_bw, chunks; in mbm_bw_count() local 281 chunks = mbm_overflow_count(m->prev_bw_msr, tval); in mbm_bw_count() 282 m->chunks_bw += chunks; in mbm_bw_count() [all …]
|
/Linux-v5.4/drivers/staging/comedi/drivers/ni_routing/tools/ |
D | convert_csv_to_c.py | 232 chunks = [ self.output_file_top, 248 chunks.append('\t&{},'.format(dev_table_name)) 277 chunks.append('\tNULL,') # terminate list 278 chunks.append('};') 279 return '\n'.join(chunks) 423 chunks = [ self.output_file_top, 439 chunks.append('\t&{},'.format(fam_table_name)) 469 chunks.append('\tNULL,') # terminate list 470 chunks.append('};') 471 return '\n'.join(chunks)
|
/Linux-v5.4/scripts/gdb/linux/ |
D | timerlist.py | 162 chunks = [] 168 chunks.append(buf[start:end]) 170 chunks.append(',') 174 chunks[0] = chunks[0][0] # Cut off the first 0 176 return "".join(chunks)
|
/Linux-v5.4/drivers/infiniband/hw/usnic/ |
D | usnic_vnic.c | 45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 119 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 255 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
/Linux-v5.4/lib/ |
D | genalloc.c | 158 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 201 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 536 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 559 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool() 583 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 602 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
/Linux-v5.4/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
D | devlink_lib_spectrum.sh | 92 devlink_resource_size_set 32000 kvd linear chunks 101 devlink_resource_size_set 32000 kvd linear chunks 110 devlink_resource_size_set 49152 kvd linear chunks
|
/Linux-v5.4/net/sctp/ |
D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 84 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 282 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 291 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
D | auth.c | 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 655 switch (param->chunks[i]) { in __sctp_auth_cid() 663 if (param->chunks[i] == chunk) in __sctp_auth_cid() 777 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_cs.c | 138 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init() 146 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init() 148 if (!p->chunks) { in amdgpu_cs_parser_init() 165 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init() 166 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init() 168 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init() 171 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init() 172 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init() 178 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init() 183 switch (p->chunks[i].chunk_id) { in amdgpu_cs_parser_init() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 94 struct pbl_chunk *chunks; member 1110 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create() 1111 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1113 if (!chunk_list->chunks) in pbl_chunk_list_create() 1122 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create() 1123 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1126 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create() 1128 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create() 1135 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create() 1143 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; in pbl_chunk_list_create() [all …]
|
/Linux-v5.4/kernel/ |
D | audit_tree.c | 17 struct list_head chunks; member 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 437 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 509 list_add(&p->list, &tree->chunks); in tag_chunk() 574 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 579 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks() 620 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 625 list_add(p, &tree->chunks); in trim_marked() 708 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 847 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
/Linux-v5.4/mm/ |
D | zbud.c | 357 int chunks, i, freechunks; in zbud_alloc() local 366 chunks = size_to_chunks(size); in zbud_alloc() 371 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 396 zhdr->first_chunks = chunks; in zbud_alloc() 398 zhdr->last_chunks = chunks; in zbud_alloc()
|
/Linux-v5.4/drivers/md/ |
D | md-bitmap.c | 779 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument 786 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc() 1061 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in md_bitmap_init_from_disk() local 1071 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() 1078 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk() 1109 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk() 1173 bit_cnt, chunks); in md_bitmap_init_from_disk() 1293 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work() 1996 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot() 2068 unsigned long chunks; in md_bitmap_resize() local [all …]
|
/Linux-v5.4/net/xdp/ |
D | xdp_umem.c | 344 unsigned int chunks, chunks_per_page; in xdp_umem_reg() local 375 chunks = (unsigned int)div_u64(size, chunk_size); in xdp_umem_reg() 376 if (chunks == 0) in xdp_umem_reg() 381 if (chunks < chunks_per_page || chunks % chunks_per_page) in xdp_umem_reg()
|
/Linux-v5.4/drivers/net/wireless/ti/wlcore/ |
D | boot.c | 237 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 242 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 245 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 247 while (chunks--) { in wlcore_boot_upload_firmware() 258 chunks, addr, len); in wlcore_boot_upload_firmware()
|
/Linux-v5.4/drivers/virt/vboxguest/ |
D | vboxguest_core.c | 355 u32 i, chunks; in vbg_balloon_work() local 383 chunks = req->balloon_chunks; in vbg_balloon_work() 384 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work() 386 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work() 390 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work() 392 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work() 397 gdev->mem_balloon.chunks++; in vbg_balloon_work() 401 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work() 406 gdev->mem_balloon.chunks--; in vbg_balloon_work() 1466 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
|
/Linux-v5.4/drivers/misc/habanalabs/ |
D | command_submission.c | 465 static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, in _hl_cs_ioctl() argument 495 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { in _hl_cs_ioctl() 615 void __user *chunks; in hl_cs_ioctl() local 634 chunks = (void __user *)(uintptr_t)args->in.chunks_restore; in hl_cs_ioctl() 668 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, in hl_cs_ioctl() 710 chunks = (void __user *)(uintptr_t)args->in.chunks_execute; in hl_cs_ioctl() 721 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq); in hl_cs_ioctl()
|
/Linux-v5.4/Documentation/admin-guide/device-mapper/ |
D | striped.rst | 6 device across one or more underlying devices. Data is written in "chunks", 7 with consecutive chunks rotating among the underlying devices. This can
|
D | snapshot.rst | 15 In the first two cases, dm copies only the chunks of data that get 37 A snapshot of the <origin> block device is created. Changed chunks of 61 maps to entire chunks to will zero the corresponding exception(s) in 81 Creates a merging snapshot that takes control of the changed chunks 83 procedure, and merges these chunks back into the <origin>. Once merging
|
/Linux-v5.4/drivers/dma/sh/ |
D | rcar-dmac.c | 79 struct list_head chunks; member 107 struct rcar_dmac_xfer_chunk chunks[0]; member 115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 351 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer() 475 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit() 508 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc() 539 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put() 635 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc() 733 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc() 991 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg() [all …]
|
D | shdma-base.c | 97 if (chunk->chunks == 1) { in shdma_tx_submit() 356 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup() 372 BUG_ON(desc->chunks != 1); in __ld_cleanup() 567 int chunks = 0; in shdma_prep_sg() local 572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg() 612 new->chunks = 1; in shdma_prep_sg() 614 new->chunks = chunks--; in shdma_prep_sg()
|
/Linux-v5.4/drivers/mtd/nand/raw/ |
D | davinci_nand.c | 660 int chunks = mtd->writesize / 512; in davinci_nand_attach_chip() local 662 if (!chunks || mtd->oobsize < 16) { in davinci_nand_attach_chip() 671 if (chunks == 1) { in davinci_nand_attach_chip() 673 } else if (chunks == 4 || chunks == 8) { in davinci_nand_attach_chip()
|