Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 25 of 109) sorted by relevance

12345

/Linux-v4.19/arch/mips/ar7/
Dprom.c161 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local
163 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env()
168 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env()
170 value = chunks[i].data; in parse_psp_env()
171 if (chunks[i].num) { in parse_psp_env()
172 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env()
179 i += chunks[i].len; in parse_psp_env()
/Linux-v4.19/drivers/gpu/drm/radeon/
Dradeon_cs.c290 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init()
297 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init()
298 if (p->chunks == NULL) { in radeon_cs_parser_init()
311 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init()
313 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init()
316 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init()
318 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
322 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init()
324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
328 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init()
[all …]
/Linux-v4.19/arch/x86/kernel/cpu/
Dintel_rdt_monitor.c230 u64 shift = 64 - MBM_CNTR_WIDTH, chunks; in mbm_overflow_count() local
232 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count()
233 return chunks >>= shift; in mbm_overflow_count()
239 u64 chunks, tval; in __mon_event_count() local
270 chunks = mbm_overflow_count(m->prev_msr, tval); in __mon_event_count()
271 m->chunks += chunks; in __mon_event_count()
274 rr->val += m->chunks; in __mon_event_count()
286 u64 tval, cur_bw, chunks; in mbm_bw_count() local
292 chunks = mbm_overflow_count(m->prev_bw_msr, tval); in mbm_bw_count()
293 m->chunks_bw += chunks; in mbm_bw_count()
[all …]
/Linux-v4.19/drivers/infiniband/hw/usnic/
Dusnic_vnic.c45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member
118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump()
119 chunk = &vnic->chunks[i]; in usnic_vnic_dump()
223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt()
229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt()
255 src = &vnic->chunks[type]; in usnic_vnic_get_resources()
287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources()
383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources()
392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources()
428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
/Linux-v4.19/lib/
Dgenalloc.c159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
200 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_virt()
220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
306 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo()
386 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free()
419 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
442 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool()
466 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
485 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
Dscatterlist.c386 unsigned int chunks, cur_page, seg_len, i; in __sg_alloc_table_from_pages() local
394 chunks = 1; in __sg_alloc_table_from_pages()
400 chunks++; in __sg_alloc_table_from_pages()
405 ret = sg_alloc_table(sgt, chunks, gfp_mask); in __sg_alloc_table_from_pages()
Ddebugobjects.c757 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local
769 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed()
770 chunks >>= ODEBUG_CHUNK_SHIFT; in __debug_check_no_obj_freed()
772 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { in __debug_check_no_obj_freed()
/Linux-v4.19/tools/testing/selftests/drivers/net/mlxsw/spectrum/
Ddevlink_lib_spectrum.sh92 devlink_resource_size_set 32000 kvd linear chunks
101 devlink_resource_size_set 32000 kvd linear chunks
110 devlink_resource_size_set 49152 kvd linear chunks
/Linux-v4.19/kernel/
Daudit_tree.c17 struct list_head chunks; member
84 INIT_LIST_HEAD(&tree->chunks); in alloc_tree()
376 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk()
478 list_add(&p->list, &tree->chunks); in tag_chunk()
542 while (!list_empty(&victim->chunks)) { in prune_one()
545 p = list_entry(victim->chunks.next, struct node, list); in prune_one()
564 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
569 list_add(p, &tree->chunks); in trim_marked()
573 while (!list_empty(&tree->chunks)) { in trim_marked()
576 node = list_entry(tree->chunks.next, struct node, list); in trim_marked()
[all …]
/Linux-v4.19/net/sctp/
Dchunk.c58 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init()
80 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
100 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy()
293 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
302 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
Dauth.c201 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument
212 if (chunks) in sctp_auth_make_key_vector()
213 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector()
224 if (chunks) { in sctp_auth_make_key_vector()
225 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector()
676 switch (param->chunks[i]) { in __sctp_auth_cid()
684 if (param->chunks[i] == chunk) in __sctp_auth_cid()
799 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
/Linux-v4.19/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c134 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init()
142 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init()
144 if (!p->chunks) { in amdgpu_cs_parser_init()
161 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init()
162 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init()
164 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init()
167 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init()
168 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init()
174 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init()
179 switch (p->chunks[i].chunk_id) { in amdgpu_cs_parser_init()
[all …]
/Linux-v4.19/net/xdp/
Dxdp_umem.c274 unsigned int chunks, chunks_per_page; in xdp_umem_reg() local
301 chunks = (unsigned int)div_u64(size, chunk_size); in xdp_umem_reg()
302 if (chunks == 0) in xdp_umem_reg()
306 if (chunks < chunks_per_page || chunks % chunks_per_page) in xdp_umem_reg()
/Linux-v4.19/mm/
Dzbud.c356 int chunks, i, freechunks; in zbud_alloc() local
365 chunks = size_to_chunks(size); in zbud_alloc()
370 for_each_unbuddied_list(i, chunks) { in zbud_alloc()
395 zhdr->first_chunks = chunks; in zbud_alloc()
397 zhdr->last_chunks = chunks; in zbud_alloc()
Dz3fold.c539 int chunks = 0, i, freechunks; in z3fold_alloc() local
555 chunks = size_to_chunks(size); in z3fold_alloc()
560 for_each_unbuddied_list(i, chunks) { in z3fold_alloc()
607 chunks >= zhdr->start_middle) in z3fold_alloc()
666 zhdr->first_chunks = chunks; in z3fold_alloc()
668 zhdr->last_chunks = chunks; in z3fold_alloc()
670 zhdr->middle_chunks = chunks; in z3fold_alloc()
/Linux-v4.19/drivers/md/
Dmd-bitmap.c778 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument
785 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc()
1060 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in md_bitmap_init_from_disk() local
1070 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk()
1077 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk()
1108 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk()
1172 bit_cnt, chunks); in md_bitmap_init_from_disk()
1292 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work()
1989 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot()
2061 unsigned long chunks; in md_bitmap_resize() local
[all …]
/Linux-v4.19/drivers/virt/vboxguest/
Dvboxguest_core.c346 u32 i, chunks; in vbg_balloon_work() local
374 chunks = req->balloon_chunks; in vbg_balloon_work()
375 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
377 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
381 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
383 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
388 gdev->mem_balloon.chunks++; in vbg_balloon_work()
392 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
397 gdev->mem_balloon.chunks--; in vbg_balloon_work()
1400 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
/Linux-v4.19/drivers/net/wireless/ti/wlcore/
Dboot.c251 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local
256 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware()
259 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware()
261 while (chunks--) { in wlcore_boot_upload_firmware()
272 chunks, addr, len); in wlcore_boot_upload_firmware()
/Linux-v4.19/Documentation/device-mapper/
Dstriped.txt5 device across one or more underlying devices. Data is written in "chunks",
6 with consecutive chunks rotating among the underlying devices. This can
Dsnapshot.txt14 In the first two cases, dm copies only the chunks of data that get
35 A snapshot of the <origin> block device is created. Changed chunks of
64 Creates a merging snapshot that takes control of the changed chunks
66 procedure, and merges these chunks back into the <origin>. Once merging
/Linux-v4.19/drivers/dma/sh/
Drcar-dmac.c79 struct list_head chunks; member
107 struct rcar_dmac_xfer_chunk chunks[0]; member
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
348 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
472 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
505 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
536 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
632 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
730 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
985 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
[all …]
Dshdma-base.c100 if (chunk->chunks == 1) { in shdma_tx_submit()
359 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup()
375 BUG_ON(desc->chunks != 1); in __ld_cleanup()
570 int chunks = 0; in shdma_prep_sg() local
575 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
615 new->chunks = 1; in shdma_prep_sg()
617 new->chunks = chunks--; in shdma_prep_sg()
/Linux-v4.19/drivers/mtd/nand/raw/
Ddavinci_nand.c678 int chunks = mtd->writesize / 512; in davinci_nand_attach_chip() local
680 if (!chunks || mtd->oobsize < 16) { in davinci_nand_attach_chip()
689 if (chunks == 1) { in davinci_nand_attach_chip()
691 } else if (chunks == 4 || chunks == 8) { in davinci_nand_attach_chip()
/Linux-v4.19/Documentation/x86/x86_64/
Dfake-numa-for-cpusets6 you can create fake NUMA nodes that represent contiguous chunks of memory and
18 four equal chunks of 512M each that we can now use to assign to cpusets. As
/Linux-v4.19/Documentation/networking/
Dnetvsc.txt67 is probed. The receive area is broken into MTU sized chunks and each may
72 The send area is broken into chunks of 6144 bytes, each of section may

12345