Home
last modified time | relevance | path

Searched refs:chunk (Results 1 – 25 of 282) sorted by relevance

12345678910>>...12

/Linux-v5.4/net/sctp/
Dinqueue.c44 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local
47 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
48 list_del_init(&chunk->list); in sctp_inq_free()
49 sctp_chunk_free(chunk); in sctp_inq_free()
64 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument
67 if (chunk->rcvr->dead) { in sctp_inq_push()
68 sctp_chunk_free(chunk); in sctp_inq_push()
77 list_add_tail(&chunk->list, &q->in_chunk_list); in sctp_inq_push()
78 if (chunk->asoc) in sctp_inq_push()
79 chunk->asoc->stats.ipackets++; in sctp_inq_push()
[all …]
Dchunk.c60 struct sctp_chunk *chunk; in sctp_datamsg_free() local
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
66 sctp_chunk_free(chunk); in sctp_datamsg_free()
76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local
86 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy()
89 asoc = chunk->asoc; in sctp_datamsg_destroy()
102 if (chunk->has_tsn) in sctp_datamsg_destroy()
107 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, in sctp_datamsg_destroy()
113 sctp_chunk_put(chunk); in sctp_datamsg_destroy()
134 static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) in sctp_datamsg_assign() argument
[all …]
Doutput.c46 struct sctp_chunk *chunk);
48 struct sctp_chunk *chunk);
50 struct sctp_chunk *chunk);
52 struct sctp_chunk *chunk,
121 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local
123 if (chunk) in sctp_packet_config()
124 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config()
161 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local
165 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free()
166 list_del_init(&chunk->list); in sctp_packet_free()
[all …]
Doutqueue.c209 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local
215 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
218 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
219 sctp_chunk_free(chunk); in __sctp_outq_teardown()
226 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
228 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
229 sctp_chunk_free(chunk); in __sctp_outq_teardown()
235 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
237 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
238 sctp_chunk_free(chunk); in __sctp_outq_teardown()
[all …]
Dsm_statefuns.c54 struct sctp_chunk *chunk,
57 struct sctp_chunk *chunk,
62 const struct sctp_chunk *chunk);
66 const struct sctp_chunk *chunk,
90 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
142 struct sctp_chunk *chunk);
159 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument
162 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid()
165 if (unlikely(chunk->pdiscard)) in sctp_chunk_length_valid()
214 struct sctp_chunk *chunk = arg; in sctp_sf_do_4_C() local
[all …]
Dsm_make_chunk.c67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local
75 if (chunk->shkey) { in sctp_control_release_owner()
76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner()
77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner()
93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner()
97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument
99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w()
100 struct sk_buff *skb = chunk->skb; in sctp_control_set_owner_w()
109 if (chunk->auth) { in sctp_control_set_owner_w()
[all …]
Dendpointola.c321 struct sctp_chunk *chunk; in sctp_endpoint_bh_rcv() local
336 while (NULL != (chunk = sctp_inq_pop(inqueue))) { in sctp_endpoint_bh_rcv()
337 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); in sctp_endpoint_bh_rcv()
342 if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { in sctp_endpoint_bh_rcv()
355 chunk->auth_chunk = skb_clone(chunk->skb, in sctp_endpoint_bh_rcv()
357 chunk->auth = 1; in sctp_endpoint_bh_rcv()
368 if (NULL == chunk->asoc) { in sctp_endpoint_bh_rcv()
370 sctp_source(chunk), in sctp_endpoint_bh_rcv()
372 chunk->asoc = asoc; in sctp_endpoint_bh_rcv()
373 chunk->transport = transport; in sctp_endpoint_bh_rcv()
[all …]
/Linux-v5.4/mm/
Dpercpu-vm.c12 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument
16 WARN_ON(chunk->immutable); in pcpu_chunk_page()
18 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
53 static void pcpu_free_pages(struct pcpu_chunk *chunk, in pcpu_free_pages() argument
81 static int pcpu_alloc_pages(struct pcpu_chunk *chunk, in pcpu_alloc_pages() argument
126 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, in pcpu_pre_unmap_flush() argument
130 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), in pcpu_pre_unmap_flush()
131 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); in pcpu_pre_unmap_flush()
152 static void pcpu_unmap_pages(struct pcpu_chunk *chunk, in pcpu_unmap_pages() argument
162 page = pcpu_chunk_page(chunk, cpu, i); in pcpu_unmap_pages()
[all …]
Dpercpu.c206 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) in pcpu_addr_in_chunk() argument
210 if (!chunk) in pcpu_addr_in_chunk()
213 start_addr = chunk->base_addr + chunk->start_offset; in pcpu_addr_in_chunk()
214 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - in pcpu_addr_in_chunk()
215 chunk->end_offset; in pcpu_addr_in_chunk()
233 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) in pcpu_chunk_slot() argument
235 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_chunk_slot()
237 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || in pcpu_chunk_slot()
266 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, in pcpu_chunk_addr() argument
269 return (unsigned long)chunk->base_addr + in pcpu_chunk_addr()
[all …]
Dpercpu-km.c35 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument
41 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument
50 struct pcpu_chunk *chunk; in pcpu_create_chunk() local
55 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk()
56 if (!chunk) in pcpu_create_chunk()
61 pcpu_free_chunk(chunk); in pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
68 chunk->data = pages; in pcpu_create_chunk()
69 chunk->base_addr = page_address(pages); in pcpu_create_chunk()
72 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
[all …]
Dpercpu-stats.c35 struct pcpu_chunk *chunk; in find_max_nr_alloc() local
40 list_for_each_entry(chunk, &pcpu_slot[slot], list) in find_max_nr_alloc()
41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc()
52 static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, in chunk_map_stats() argument
55 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats()
69 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats()
70 pcpu_chunk_map_bits(chunk) - in chunk_map_stats()
71 chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1); in chunk_map_stats()
72 last_alloc = test_bit(last_alloc, chunk->alloc_map) ? in chunk_map_stats()
76 start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; in chunk_map_stats()
[all …]
Dpercpu-internal.h80 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) in pcpu_chunk_nr_blocks() argument
82 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks()
104 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk) in pcpu_chunk_map_bits() argument
106 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); in pcpu_chunk_map_bits()
146 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) in pcpu_stats_area_alloc() argument
159 chunk->nr_alloc++; in pcpu_stats_area_alloc()
160 chunk->max_alloc_size = max(chunk->max_alloc_size, size); in pcpu_stats_area_alloc()
170 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) in pcpu_stats_area_dealloc() argument
177 chunk->nr_alloc--; in pcpu_stats_area_dealloc()
214 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) in pcpu_stats_area_alloc() argument
[all …]
/Linux-v5.4/drivers/s390/cio/
Ditcw.c182 void *chunk; in itcw_init() local
194 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init()
195 if (IS_ERR(chunk)) in itcw_init()
196 return chunk; in itcw_init()
197 itcw = chunk; in itcw_init()
210 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init()
211 if (IS_ERR(chunk)) in itcw_init()
212 return chunk; in itcw_init()
213 itcw->tcw = chunk; in itcw_init()
218 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Dicm.c55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument
59 if (chunk->nsg > 0) in mlx4_free_icm_pages()
60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages()
65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages()
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
74 chunk->buf[i].size, in mlx4_free_icm_coherent()
75 chunk->buf[i].addr, in mlx4_free_icm_coherent()
[all …]
Dicm.h74 struct mlx4_icm_chunk *chunk; member
100 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first()
108 return !iter->chunk; in mlx4_icm_last()
113 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next()
114 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next()
115 iter->chunk = NULL; in mlx4_icm_next()
119 iter->chunk = list_entry(iter->chunk->list.next, in mlx4_icm_next()
127 if (iter->chunk->coherent) in mlx4_icm_addr()
128 return iter->chunk->buf[iter->page_idx].dma_addr; in mlx4_icm_addr()
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]); in mlx4_icm_addr()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_dmem.c89 struct nouveau_dmem_chunk *chunk = page->zone_device_data; in nouveau_dmem_page_addr() local
90 unsigned long idx = page_to_pfn(page) - chunk->pfn_first; in nouveau_dmem_page_addr()
92 return (idx << PAGE_SHIFT) + chunk->bo->bo.offset; in nouveau_dmem_page_addr()
97 struct nouveau_dmem_chunk *chunk = page->zone_device_data; in nouveau_dmem_page_free() local
98 unsigned long idx = page_to_pfn(page) - chunk->pfn_first; in nouveau_dmem_page_free()
107 spin_lock(&chunk->lock); in nouveau_dmem_page_free()
108 clear_bit(idx, chunk->bitmap); in nouveau_dmem_page_free()
109 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free()
110 chunk->callocated--; in nouveau_dmem_page_free()
115 spin_unlock(&chunk->lock); in nouveau_dmem_page_free()
[all …]
/Linux-v5.4/kernel/
Daudit_tree.c42 struct audit_chunk *chunk; member
128 static void free_chunk(struct audit_chunk *chunk) in free_chunk() argument
132 for (i = 0; i < chunk->count; i++) { in free_chunk()
133 if (chunk->owners[i].owner) in free_chunk()
134 put_tree(chunk->owners[i].owner); in free_chunk()
136 kfree(chunk); in free_chunk()
139 void audit_put_chunk(struct audit_chunk *chunk) in audit_put_chunk() argument
141 if (atomic_long_dec_and_test(&chunk->refs)) in audit_put_chunk()
142 free_chunk(chunk); in audit_put_chunk()
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); in __put_chunk() local
[all …]
/Linux-v5.4/lib/
Dgenalloc.c38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument
40 return chunk->end_addr - chunk->start_addr + 1; in chunk_size()
185 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local
190 chunk = vzalloc_node(nbytes, nid); in gen_pool_add_owner()
191 if (unlikely(chunk == NULL)) in gen_pool_add_owner()
194 chunk->phys_addr = phys; in gen_pool_add_owner()
195 chunk->start_addr = virt; in gen_pool_add_owner()
196 chunk->end_addr = virt + size - 1; in gen_pool_add_owner()
197 chunk->owner = owner; in gen_pool_add_owner()
198 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner()
[all …]
/Linux-v5.4/drivers/infiniband/hw/i40iw/
Di40iw_pble.c53 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
63 struct i40iw_chunk *chunk; in i40iw_destroy_pble_pool() local
68 chunk = list_entry(clist, struct i40iw_chunk, list); in i40iw_destroy_pble_pool()
69 if (chunk->type == I40IW_VMALLOC) in i40iw_destroy_pble_pool()
70 i40iw_free_vmalloc_mem(dev->hw, chunk); in i40iw_destroy_pble_pool()
71 kfree(chunk); in i40iw_destroy_pble_pool()
138 struct i40iw_chunk *chunk = info->chunk; in add_sd_direct() local
151 chunk->type = I40IW_DMA_COHERENT; in add_sd_direct()
155 chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT; in add_sd_direct()
156 chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset); in add_sd_direct()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_icm_pool.c169 static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) in dr_icm_chunk_ste_init() argument
171 struct mlx5dr_icm_bucket *bucket = chunk->bucket; in dr_icm_chunk_ste_init()
173 chunk->ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init()
174 sizeof(chunk->ste_arr[0]), GFP_KERNEL); in dr_icm_chunk_ste_init()
175 if (!chunk->ste_arr) in dr_icm_chunk_ste_init()
178 chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init()
180 if (!chunk->hw_ste_arr) in dr_icm_chunk_ste_init()
183 chunk->miss_list = kvmalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init()
184 sizeof(chunk->miss_list[0]), GFP_KERNEL); in dr_icm_chunk_ste_init()
185 if (!chunk->miss_list) in dr_icm_chunk_ste_init()
[all …]
/Linux-v5.4/include/net/sctp/
Dsm.h158 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
176 const struct sctp_chunk *chunk,
179 const struct sctp_chunk *chunk);
181 const struct sctp_chunk *chunk);
184 const struct sctp_chunk *chunk);
197 const struct sctp_chunk *chunk);
199 const struct sctp_chunk *chunk);
202 const struct sctp_chunk *chunk);
203 int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
205 const struct sctp_chunk *chunk,
[all …]
/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_memfree.c64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_pages() argument
68 if (chunk->nsg > 0) in mthca_free_icm_pages()
69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
73 __free_pages(sg_page(&chunk->mem[i]), in mthca_free_icm_pages()
74 get_order(chunk->mem[i].length)); in mthca_free_icm_pages()
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_coherent() argument
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, in mthca_free_icm_coherent()
83 lowmem_page_address(sg_page(&chunk->mem[i])), in mthca_free_icm_coherent()
[all …]
/Linux-v5.4/drivers/gpu/drm/qxl/
Dqxl_image.c38 struct qxl_drm_chunk *chunk; in qxl_allocate_chunk() local
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); in qxl_allocate_chunk()
42 if (!chunk) in qxl_allocate_chunk()
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk()
47 kfree(chunk); in qxl_allocate_chunk()
51 list_add_tail(&chunk->head, &image->chunk_list); in qxl_allocate_chunk()
88 struct qxl_drm_chunk *chunk, *tmp; in qxl_image_free_objects() local
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { in qxl_image_free_objects()
91 qxl_bo_unref(&chunk->bo); in qxl_image_free_objects()
92 kfree(chunk); in qxl_image_free_objects()
[all …]
/Linux-v5.4/arch/mips/dec/prom/
Dconsole.c18 unsigned int chunk = sizeof(buf) - 1; in prom_console_write() local
21 if (chunk > c) in prom_console_write()
22 chunk = c; in prom_console_write()
23 memcpy(buf, s, chunk); in prom_console_write()
24 buf[chunk] = '\0'; in prom_console_write()
26 s += chunk; in prom_console_write()
27 c -= chunk; in prom_console_write()
/Linux-v5.4/sound/core/
Dsgbuf.c60 unsigned int i, pages, chunk, maxpages; in snd_malloc_sgbuf_pages() local
93 chunk = pages; in snd_malloc_sgbuf_pages()
95 if (chunk > maxpages) in snd_malloc_sgbuf_pages()
96 chunk = maxpages; in snd_malloc_sgbuf_pages()
97 chunk <<= PAGE_SHIFT; in snd_malloc_sgbuf_pages()
99 chunk, &tmpb) < 0) { in snd_malloc_sgbuf_pages()
107 chunk = tmpb.bytes >> PAGE_SHIFT; in snd_malloc_sgbuf_pages()
108 for (i = 0; i < chunk; i++) { in snd_malloc_sgbuf_pages()
112 table->addr |= chunk; /* mark head */ in snd_malloc_sgbuf_pages()
118 sgbuf->pages += chunk; in snd_malloc_sgbuf_pages()
[all …]

12345678910>>...12