Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 25 of 131) sorted by relevance

123456

/Linux-v5.10/arch/mips/ar7/
Dprom.c149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local
151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env()
156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env()
158 value = chunks[i].data; in parse_psp_env()
159 if (chunks[i].num) { in parse_psp_env()
160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env()
167 i += chunks[i].len; in parse_psp_env()
/Linux-v5.10/drivers/gpu/drm/radeon/
Dradeon_cs.c296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init()
303 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init()
304 if (p->chunks == NULL) { in radeon_cs_parser_init()
317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init()
319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init()
322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init()
324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init()
330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
334 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init()
[all …]
/Linux-v5.10/drivers/net/ethernet/netronome/nfp/nfpcore/
Dnfp_nsp.c504 } *chunks; in nfp_nsp_command_buf_dma_sg() local
516 chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg()
517 if (!chunks) in nfp_nsp_command_buf_dma_sg()
525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg()
527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg()
530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg()
535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg()
537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg()
539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg()
547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg()
[all …]
/Linux-v5.10/drivers/staging/comedi/drivers/ni_routing/tools/
Dconvert_csv_to_c.py232 chunks = [ self.output_file_top,
248 chunks.append('\t&{},'.format(dev_table_name))
277 chunks.append('\tNULL,') # terminate list
278 chunks.append('};')
279 return '\n'.join(chunks)
423 chunks = [ self.output_file_top,
439 chunks.append('\t&{},'.format(fam_table_name))
469 chunks.append('\tNULL,') # terminate list
470 chunks.append('};')
471 return '\n'.join(chunks)
/Linux-v5.10/arch/x86/kernel/cpu/resctrl/
Dmonitor.c219 u64 shift = 64 - width, chunks; in mbm_overflow_count() local
221 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count()
222 return chunks >>= shift; in mbm_overflow_count()
228 u64 chunks, tval; in __mon_event_count() local
259 chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width); in __mon_event_count()
260 m->chunks += chunks; in __mon_event_count()
263 rr->val += m->chunks; in __mon_event_count()
275 u64 tval, cur_bw, chunks; in mbm_bw_count() local
281 chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width); in mbm_bw_count()
282 cur_bw = (chunks * r->mon_scale) >> 20; in mbm_bw_count()
/Linux-v5.10/scripts/gdb/linux/
Dtimerlist.py162 chunks = []
168 chunks.append(buf[start:end])
170 chunks.append(',')
174 chunks[0] = chunks[0][0] # Cut off the first 0
176 return "".join(chunks)
/Linux-v5.10/drivers/infiniband/hw/usnic/
Dusnic_vnic.c45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member
118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump()
119 chunk = &vnic->chunks[i]; in usnic_vnic_dump()
223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt()
229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt()
255 src = &vnic->chunks[type]; in usnic_vnic_get_resources()
287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources()
383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources()
392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources()
428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
/Linux-v5.10/net/xdp/
Dxdp_umem.c159 unsigned int chunks, chunks_rem; in xdp_umem_reg() local
194 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg()
195 if (chunks == 0) in xdp_umem_reg()
207 umem->chunks = chunks; in xdp_umem_reg()
/Linux-v5.10/lib/
Dgenalloc.c158 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
201 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner()
221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner()
501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner()
536 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
559 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr()
584 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
603 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c140 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init()
148 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init()
150 if (!p->chunks) { in amdgpu_cs_parser_init()
167 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init()
168 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init()
170 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init()
173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init()
174 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init()
180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init()
185 switch (p->chunks[i].chunk_id) { in amdgpu_cs_parser_init()
[all …]
/Linux-v5.10/tools/testing/selftests/drivers/net/mlxsw/spectrum/
Ddevlink_lib_spectrum.sh92 devlink_resource_size_set 32000 kvd linear chunks
101 devlink_resource_size_set 32000 kvd linear chunks
110 devlink_resource_size_set 49152 kvd linear chunks
/Linux-v5.10/net/sctp/
Dchunk.c43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init()
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy()
280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
Dauth.c186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument
197 if (chunks) in sctp_auth_make_key_vector()
198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector()
209 if (chunks) { in sctp_auth_make_key_vector()
210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector()
656 switch (param->chunks[i]) { in __sctp_auth_cid()
664 if (param->chunks[i] == chunk) in __sctp_auth_cid()
772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
/Linux-v5.10/kernel/
Daudit_tree.c17 struct list_head chunks; member
101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree()
435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk()
507 list_add(&p->list, &tree->chunks); in tag_chunk()
572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks()
577 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks()
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
623 list_add(p, &tree->chunks); in trim_marked()
706 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees()
845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule()
[all …]
/Linux-v5.10/drivers/infiniband/hw/efa/
Defa_verbs.c92 struct pbl_chunk *chunks; member
1207 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create()
1208 sizeof(*chunk_list->chunks), in pbl_chunk_list_create()
1210 if (!chunk_list->chunks) in pbl_chunk_list_create()
1219 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create()
1220 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create()
1223 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create()
1225 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create()
1232 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create()
1240 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; in pbl_chunk_list_create()
[all …]
/Linux-v5.10/mm/
Dzbud.c357 int chunks, i, freechunks; in zbud_alloc() local
366 chunks = size_to_chunks(size); in zbud_alloc()
370 for_each_unbuddied_list(i, chunks) { in zbud_alloc()
395 zhdr->first_chunks = chunks; in zbud_alloc()
397 zhdr->last_chunks = chunks; in zbud_alloc()
Dz3fold.c707 short chunks = size_to_chunks(sz); in compact_single_buddy() local
719 chunks >= new_zhdr->start_middle) { in compact_single_buddy()
732 new_zhdr->first_chunks = chunks; in compact_single_buddy()
736 new_zhdr->middle_chunks = chunks; in compact_single_buddy()
742 new_zhdr->last_chunks = chunks; in compact_single_buddy()
886 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local
891 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc()
948 l = &unbuddied[chunks]; in __z3fold_alloc()
1094 int chunks = size_to_chunks(size); in z3fold_alloc() local
1114 chunks >= zhdr->start_middle) in z3fold_alloc()
[all …]
/Linux-v5.10/drivers/infiniband/ulp/rtrs/
DREADME28 session. A session is associated with a set of memory chunks reserved on the
36 chunks reserved for him on the server side. Their number, size and addresses
45 which of the memory chunks has been accessed and at which offset the message
80 the server (number of memory chunks which are going to be allocated for that
122 1. When processing a write request client selects one of the memory chunks
139 1. When processing a write request client selects one of the memory chunks
144 using the IMM field, Server invalidate rkey associated to the memory chunks
162 1. When processing a read request client selects one of the memory chunks
181 1. When processing a read request client selects one of the memory chunks
186 Server invalidate rkey associated to the memory chunks first, when it finishes,
Drtrs-srv.c626 int nr, chunks; in map_cont_bufs() local
628 chunks = chunks_per_mr * mri; in map_cont_bufs()
631 srv->queue_depth - chunks); in map_cont_bufs()
638 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs()
673 sess->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs()
1009 data = page_address(srv->chunks[buf_id]); in process_read()
1062 data = page_address(srv->chunks[buf_id]); in process_write()
1136 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done()
1239 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done()
1351 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), in __alloc_srv()
[all …]
/Linux-v5.10/drivers/md/
Dmd-bitmap.c776 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument
783 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc()
1056 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in md_bitmap_init_from_disk() local
1066 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk()
1073 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk()
1104 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk()
1168 bit_cnt, chunks); in md_bitmap_init_from_disk()
1288 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work()
1993 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot()
2066 unsigned long chunks; in md_bitmap_resize() local
[all …]
/Linux-v5.10/drivers/net/wireless/ti/wlcore/
Dboot.c237 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local
242 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware()
245 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware()
247 while (chunks--) { in wlcore_boot_upload_firmware()
258 chunks, addr, len); in wlcore_boot_upload_firmware()
/Linux-v5.10/drivers/dma/sh/
Drcar-dmac.c79 struct list_head chunks; member
107 struct rcar_dmac_xfer_chunk chunks[0]; member
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
358 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
482 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
515 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
546 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
642 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
740 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
998 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
[all …]
Dshdma-base.c97 if (chunk->chunks == 1) { in shdma_tx_submit()
356 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup()
372 BUG_ON(desc->chunks != 1); in __ld_cleanup()
567 int chunks = 0; in shdma_prep_sg() local
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
612 new->chunks = 1; in shdma_prep_sg()
614 new->chunks = chunks--; in shdma_prep_sg()
/Linux-v5.10/drivers/virt/vboxguest/
Dvboxguest_core.c356 u32 i, chunks; in vbg_balloon_work() local
384 chunks = req->balloon_chunks; in vbg_balloon_work()
385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
398 gdev->mem_balloon.chunks++; in vbg_balloon_work()
402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
407 gdev->mem_balloon.chunks--; in vbg_balloon_work()
1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
/Linux-v5.10/Documentation/admin-guide/device-mapper/
Dstriped.rst6 device across one or more underlying devices. Data is written in "chunks",
7 with consecutive chunks rotating among the underlying devices. This can

123456