Home
last modified time | relevance | path

Searched refs:heap (Results 1 – 25 of 71) sorted by relevance

123

/Linux-v5.4/drivers/staging/android/ion/
Dion_heap.c20 void *ion_heap_map_kernel(struct ion_heap *heap, in ion_heap_map_kernel() argument
58 void ion_heap_unmap_kernel(struct ion_heap *heap, in ion_heap_unmap_kernel() argument
64 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, in ion_heap_map_user() argument
157 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) in ion_heap_freelist_add() argument
159 spin_lock(&heap->free_lock); in ion_heap_freelist_add()
160 list_add(&buffer->list, &heap->free_list); in ion_heap_freelist_add()
161 heap->free_list_size += buffer->size; in ion_heap_freelist_add()
162 spin_unlock(&heap->free_lock); in ion_heap_freelist_add()
163 wake_up(&heap->waitqueue); in ion_heap_freelist_add()
166 size_t ion_heap_freelist_size(struct ion_heap *heap) in ion_heap_freelist_size() argument
[all …]
Dion_system_heap.c43 struct ion_heap heap; member
47 static struct page *alloc_buffer_page(struct ion_system_heap *heap, in alloc_buffer_page() argument
51 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page()
56 static void free_buffer_page(struct ion_system_heap *heap, in free_buffer_page() argument
68 pool = heap->pools[order_to_index(order)]; in free_buffer_page()
73 static struct page *alloc_largest_available(struct ion_system_heap *heap, in alloc_largest_available() argument
87 page = alloc_buffer_page(heap, buffer, orders[i]); in alloc_largest_available()
97 static int ion_system_heap_allocate(struct ion_heap *heap, in ion_system_heap_allocate() argument
102 struct ion_system_heap *sys_heap = container_of(heap, in ion_system_heap_allocate()
104 heap); in ion_system_heap_allocate()
[all …]
Dion.c33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, in ion_buffer_create() argument
45 buffer->heap = heap; in ion_buffer_create()
50 ret = heap->ops->allocate(heap, buffer, len, flags); in ion_buffer_create()
53 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) in ion_buffer_create()
56 ion_heap_freelist_drain(heap, 0); in ion_buffer_create()
57 ret = heap->ops->allocate(heap, buffer, len, flags); in ion_buffer_create()
68 spin_lock(&heap->stat_lock); in ion_buffer_create()
69 heap->num_of_buffers++; in ion_buffer_create()
70 heap->num_of_alloc_bytes += len; in ion_buffer_create()
71 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm) in ion_buffer_create()
[all …]
Dion.h43 struct ion_heap *heap; member
86 int (*allocate)(struct ion_heap *heap,
90 void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
91 void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
94 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
169 void ion_device_add_heap(struct ion_heap *heap);
175 void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
176 void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
177 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
190 int ion_heap_init_shrinker(struct ion_heap *heap);
[all …]
Dion_cma_heap.c20 struct ion_heap heap; member
24 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
27 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, in ion_cma_allocate() argument
31 struct ion_cma_heap *cma_heap = to_cma_heap(heap); in ion_cma_allocate()
85 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); in ion_cma_free()
113 cma_heap->heap.ops = &ion_cma_ops; in __ion_cma_heap_create()
115 cma_heap->heap.type = ION_HEAP_TYPE_DMA; in __ion_cma_heap_create()
116 return &cma_heap->heap; in __ion_cma_heap_create()
121 struct ion_heap *heap; in __ion_add_cma_heaps() local
123 heap = __ion_cma_heap_create(cma); in __ion_add_cma_heaps()
[all …]
DKconfig15 bool "Ion system heap"
18 Choose this option to enable the Ion system heap. The system heap
22 bool "Ion CMA heap support"
25 Choose this option to enable CMA heaps with Ion. This heap is backed
/Linux-v5.4/fs/ubifs/
Dlprops.c50 static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, in move_up_lpt_heap() argument
63 val2 = get_heap_comp_val(heap->arr[ppos], cat); in move_up_lpt_heap()
67 heap->arr[ppos]->hpos = hpos; in move_up_lpt_heap()
68 heap->arr[hpos] = heap->arr[ppos]; in move_up_lpt_heap()
69 heap->arr[ppos] = lprops; in move_up_lpt_heap()
87 static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, in adjust_lpt_heap() argument
97 val2 = get_heap_comp_val(heap->arr[ppos], cat); in adjust_lpt_heap()
101 heap->arr[ppos]->hpos = hpos; in adjust_lpt_heap()
102 heap->arr[hpos] = heap->arr[ppos]; in adjust_lpt_heap()
103 heap->arr[ppos] = lprops; in adjust_lpt_heap()
[all …]
Dfind.c45 struct ubifs_lpt_heap *heap; in valuable() local
51 heap = &c->lpt_heap[cat - 1]; in valuable()
52 if (heap->cnt < heap->max_cnt) in valuable()
129 struct ubifs_lpt_heap *heap; in scan_for_dirty() local
134 heap = &c->lpt_heap[LPROPS_FREE - 1]; in scan_for_dirty()
135 for (i = 0; i < heap->cnt; i++) { in scan_for_dirty()
136 lprops = heap->arr[i]; in scan_for_dirty()
226 struct ubifs_lpt_heap *heap, *idx_heap; in ubifs_find_dirty_leb() local
272 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; in ubifs_find_dirty_leb()
292 if (heap->cnt) { in ubifs_find_dirty_leb()
[all …]
Dlpt_commit.c764 struct ubifs_lpt_heap *heap; in populate_lsave() local
791 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; in populate_lsave()
792 for (i = 0; i < heap->cnt; i++) { in populate_lsave()
793 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
797 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; in populate_lsave()
798 for (i = 0; i < heap->cnt; i++) { in populate_lsave()
799 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
803 heap = &c->lpt_heap[LPROPS_FREE - 1]; in populate_lsave()
804 for (i = 0; i < heap->cnt; i++) { in populate_lsave()
805 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c255 nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) in nvkm_mmu_type() argument
257 if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) { in nvkm_mmu_type()
258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type()
259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type()
268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap()
269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap()
270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap()
282 int heap; in nvkm_mmu_host() local
285 heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL); in nvkm_mmu_host()
286 nvkm_mmu_type(mmu, heap, type); in nvkm_mmu_host()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/include/nvkm/core/
Dmm.h12 u8 heap; member
34 int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
36 int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
38 int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
44 nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap) in nvkm_mm_heap_size() argument
49 if (node->heap == heap) in nvkm_mm_heap_size()
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/core/
Dmm.c99 b->heap = a->heap; in region_head()
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument
122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_head()
123 if (this->heap != heap) in nvkm_mm_head()
175 b->heap = a->heap; in region_tail()
186 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_tail() argument
198 if (unlikely(heap != NVKM_MM_HEAP_ANY)) { in nvkm_mm_tail()
199 if (this->heap != heap) in nvkm_mm_tail()
240 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block) in nvkm_mm_init() argument
277 node->heap = heap; in nvkm_mm_init()
Dgpuobj.c180 ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor()
183 ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, in nvkm_gpuobj_ctor()
211 return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1); in nvkm_gpuobj_ctor()
220 nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); in nvkm_gpuobj_del()
221 nvkm_mm_fini(&gpuobj->heap); in nvkm_gpuobj_del()
/Linux-v5.4/drivers/gpu/drm/nouveau/nvif/
Dmmu.c32 kfree(mmu->heap); in nvif_mmu_fini()
49 mmu->heap = NULL; in nvif_mmu_init()
68 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_init()
72 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_init()
88 mmu->heap[i].size = args.size; in nvif_mmu_init()
108 mmu->type[i].heap = args.heap; in nvif_mmu_init()
/Linux-v5.4/tools/testing/selftests/android/ion/
DREADME13 two user space processes, using various heap types.
14 The following heap types are supported by ION driver.
22 Each heap is associated with the respective heap id.
26 the heap id, writing some data to this buffer and then exporting the FD
61 buffer sharing using ion system heap.
62 Currently the heap size is hard coded as just 10 bytes inside this script.
68 Now you can run the export and import manually by specifying the heap type
69 and the heap size.
82 heap type: 0
83 heap id: 1
[all …]
/Linux-v5.4/drivers/md/bcache/
Dmovinggc.c194 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; in bucket_heap_top()
213 ca->heap.used = 0; in bch_moving_gc()
222 if (!heap_full(&ca->heap)) { in bch_moving_gc()
224 heap_add(&ca->heap, b, bucket_cmp); in bch_moving_gc()
225 } else if (bucket_cmp(b, heap_peek(&ca->heap))) { in bch_moving_gc()
229 ca->heap.data[0] = b; in bch_moving_gc()
230 heap_sift(&ca->heap, 0, bucket_cmp); in bch_moving_gc()
235 heap_pop(&ca->heap, b, bucket_cmp); in bch_moving_gc()
239 while (heap_pop(&ca->heap, b, bucket_cmp)) in bch_moving_gc()
Dutil.h42 #define init_heap(heap, _size, gfp) \ argument
45 (heap)->used = 0; \
46 (heap)->size = (_size); \
47 _bytes = (heap)->size * sizeof(*(heap)->data); \
48 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
49 (heap)->data; \
52 #define free_heap(heap) \ argument
54 kvfree((heap)->data); \
55 (heap)->data = NULL; \
Dalloc.c185 ca->heap.used = 0; in invalidate_buckets_lru()
191 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
192 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
193 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
194 ca->heap.data[0] = b; in invalidate_buckets_lru()
195 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
199 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
200 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
203 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
/Linux-v5.4/lib/zlib_deflate/
Ddeftree.c352 top = s->heap[SMALLEST]; \
353 s->heap[SMALLEST] = s->heap[s->heap_len--]; \
377 int v = s->heap[k]; in pqdownheap()
382 smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { in pqdownheap()
386 if (smaller(tree, v, s->heap[j], s->depth)) break; in pqdownheap()
389 s->heap[k] = s->heap[j]; k = j; in pqdownheap()
394 s->heap[k] = v; in pqdownheap()
430 tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ in gen_bitlen()
433 n = s->heap[h]; in gen_bitlen()
474 m = s->heap[--h]; in gen_bitlen()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dnv04.c31 struct nvkm_mm heap; member
103 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv04_instobj_dtor()
136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, in nv04_instobj_new()
168 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); in nv04_instmem_oneinit()
206 nvkm_mm_fini(&imem->heap); in nv04_instmem_dtor()
Dnv40.c32 struct nvkm_mm heap; member
103 nvkm_mm_free(&iobj->imem->heap, &iobj->node); in nv40_instobj_dtor()
136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, in nv40_instobj_new()
180 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); in nv40_instmem_oneinit()
222 nvkm_mm_fini(&imem->heap); in nv40_instmem_dtor()
/Linux-v5.4/arch/x86/boot/compressed/
Dmisc.c340 asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, in extract_kernel() argument
380 free_mem_ptr = heap; /* Heap */ in extract_kernel()
381 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; in extract_kernel()
423 if (heap > 0x3fffffffffffUL) in extract_kernel()
428 if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) in extract_kernel()
/Linux-v5.4/arch/mips/boot/compressed/
Dhead.S35 PTR_LA a0, (.heap) /* heap address */
55 .comm .heap,BOOT_HEAP_SIZE,4
/Linux-v5.4/drivers/gpu/drm/nouveau/include/nvif/
Dmmu.h15 } *heap; member
27 u8 heap; member
/Linux-v5.4/tools/perf/util/
Ds390-cpumsf.c170 struct auxtrace_heap heap; member
816 return auxtrace_heap__add(&sf->heap, queue_nr, ts); in s390_cpumsf_setup_queue()
852 if (!sf->heap.heap_cnt) in s390_cpumsf_process_queues()
855 if (sf->heap.heap_array[0].ordinal >= timestamp) in s390_cpumsf_process_queues()
858 queue_nr = sf->heap.heap_array[0].queue_nr; in s390_cpumsf_process_queues()
862 auxtrace_heap__pop(&sf->heap); in s390_cpumsf_process_queues()
863 if (sf->heap.heap_cnt) { in s390_cpumsf_process_queues()
864 ts = sf->heap.heap_array[0].ordinal + 1; in s390_cpumsf_process_queues()
873 auxtrace_heap__add(&sf->heap, queue_nr, ts); in s390_cpumsf_process_queues()
877 ret = auxtrace_heap__add(&sf->heap, queue_nr, ts); in s390_cpumsf_process_queues()
[all …]

123