1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * drivers/staging/android/ion/ion.h 4 * 5 * Copyright (C) 2011 Google, Inc. 6 */ 7 8 #ifndef _ION_H 9 #define _ION_H 10 11 #include <linux/device.h> 12 #include <linux/dma-direction.h> 13 #include <linux/kref.h> 14 #include <linux/mm_types.h> 15 #include <linux/mutex.h> 16 #include <linux/rbtree.h> 17 #include <linux/sched.h> 18 #include <linux/shrinker.h> 19 #include <linux/types.h> 20 #include <linux/miscdevice.h> 21 22 #include "../uapi/ion.h" 23 24 /** 25 * struct ion_platform_heap - defines a heap in the given platform 26 * @type: type of the heap from ion_heap_type enum 27 * @id: unique identifier for heap. When allocating higher numb ers 28 * will be allocated from first. At allocation these are passed 29 * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. 30 * @name: used for debug purposes 31 * @base: base address of heap in physical memory if applicable 32 * @size: size of the heap in bytes if applicable 33 * @priv: private info passed from the board file 34 * 35 * Provided by the board file. 36 */ 37 struct ion_platform_heap { 38 enum ion_heap_type type; 39 unsigned int id; 40 const char *name; 41 phys_addr_t base; 42 size_t size; 43 phys_addr_t align; 44 void *priv; 45 }; 46 47 /** 48 * struct ion_buffer - metadata for a particular buffer 49 * @ref: reference count 50 * @node: node in the ion_device buffers tree 51 * @dev: back pointer to the ion_device 52 * @heap: back pointer to the heap the buffer came from 53 * @flags: buffer specific flags 54 * @private_flags: internal buffer specific flags 55 * @size: size of the buffer 56 * @priv_virt: private data to the buffer representable as 57 * a void * 58 * @lock: protects the buffers cnt fields 59 * @kmap_cnt: number of times the buffer is mapped to the kernel 60 * @vaddr: the kernel mapping if kmap_cnt is not zero 61 * @sg_table: the sg table for the buffer if dmap_cnt is not zero 62 */ 63 struct ion_buffer { 64 union { 65 struct rb_node node; 66 struct list_head list; 67 }; 68 struct ion_device *dev; 69 struct ion_heap *heap; 70 unsigned long flags; 71 unsigned long private_flags; 72 size_t size; 73 void *priv_virt; 74 struct mutex lock; 75 int kmap_cnt; 76 void *vaddr; 77 struct sg_table *sg_table; 78 struct list_head attachments; 79 }; 80 81 void ion_buffer_destroy(struct ion_buffer *buffer); 82 83 /** 84 * struct ion_device - the metadata of the ion device node 85 * @dev: the actual misc device 86 * @buffers: an rb tree of all the existing buffers 87 * @buffer_lock: lock protecting the tree of buffers 88 * @lock: rwsem protecting the tree of heaps and clients 89 */ 90 struct ion_device { 91 struct miscdevice dev; 92 struct rb_root buffers; 93 struct mutex buffer_lock; 94 struct rw_semaphore lock; 95 struct plist_head heaps; 96 struct dentry *debug_root; 97 int heap_cnt; 98 }; 99 100 /** 101 * struct ion_heap_ops - ops to operate on a given heap 102 * @allocate: allocate memory 103 * @free: free memory 104 * @map_kernel map memory to the kernel 105 * @unmap_kernel unmap memory to the kernel 106 * @map_user map memory to userspace 107 * 108 * allocate, phys, and map_user return 0 on success, -errno on error. 109 * map_dma and map_kernel return pointer on success, ERR_PTR on 110 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in 111 * the buffer's private_flags when called from a shrinker. In that 112 * case, the pages being free'd must be truly free'd back to the 113 * system, not put in a page pool or otherwise cached. 114 */ 115 struct ion_heap_ops { 116 int (*allocate)(struct ion_heap *heap, 117 struct ion_buffer *buffer, unsigned long len, 118 unsigned long flags); 119 void (*free)(struct ion_buffer *buffer); 120 void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 121 void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 122 int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, 123 struct vm_area_struct *vma); 124 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 125 }; 126 127 /** 128 * heap flags - flags between the heaps and core ion code 129 */ 130 #define ION_HEAP_FLAG_DEFER_FREE BIT(0) 131 132 /** 133 * private flags - flags internal to ion 134 */ 135 /* 136 * Buffer is being freed from a shrinker function. Skip any possible 137 * heap-specific caching mechanism (e.g. page pools). Guarantees that 138 * any buffer storage that came from the system allocator will be 139 * returned to the system allocator. 140 */ 141 #define ION_PRIV_FLAG_SHRINKER_FREE BIT(0) 142 143 /** 144 * struct ion_heap - represents a heap in the system 145 * @node: rb node to put the heap on the device's tree of heaps 146 * @dev: back pointer to the ion_device 147 * @type: type of heap 148 * @ops: ops struct as above 149 * @flags: flags 150 * @id: id of heap, also indicates priority of this heap when 151 * allocating. These are specified by platform data and 152 * MUST be unique 153 * @name: used for debugging 154 * @shrinker: a shrinker for the heap 155 * @free_list: free list head if deferred free is used 156 * @free_list_size size of the deferred free list in bytes 157 * @lock: protects the free list 158 * @waitqueue: queue to wait on from deferred free thread 159 * @task: task struct of deferred free thread 160 * @debug_show: called when heap debug file is read to add any 161 * heap specific debug info to output 162 * 163 * Represents a pool of memory from which buffers can be made. In some 164 * systems the only heap is regular system memory allocated via vmalloc. 165 * On others, some blocks might require large physically contiguous buffers 166 * that are allocated from a specially reserved heap. 167 */ 168 struct ion_heap { 169 struct plist_node node; 170 struct ion_device *dev; 171 enum ion_heap_type type; 172 struct ion_heap_ops *ops; 173 unsigned long flags; 174 unsigned int id; 175 const char *name; 176 struct shrinker shrinker; 177 struct list_head free_list; 178 size_t free_list_size; 179 spinlock_t free_lock; 180 wait_queue_head_t waitqueue; 181 struct task_struct *task; 182 183 int (*debug_show)(struct ion_heap *heap, struct seq_file *s, 184 void *unused); 185 }; 186 187 /** 188 * ion_device_add_heap - adds a heap to the ion device 189 * @heap: the heap to add 190 */ 191 void ion_device_add_heap(struct ion_heap *heap); 192 193 /** 194 * some helpers for common operations on buffers using the sg_table 195 * and vaddr fields 196 */ 197 void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); 198 void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); 199 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 200 struct vm_area_struct *vma); 201 int ion_heap_buffer_zero(struct ion_buffer *buffer); 202 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); 203 204 int ion_alloc(size_t len, 205 unsigned int heap_id_mask, 206 unsigned int flags); 207 208 /** 209 * ion_heap_init_shrinker 210 * @heap: the heap 211 * 212 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op 213 * this function will be called to setup a shrinker to shrink the freelists 214 * and call the heap's shrink op. 215 */ 216 int ion_heap_init_shrinker(struct ion_heap *heap); 217 218 /** 219 * ion_heap_init_deferred_free -- initialize deferred free functionality 220 * @heap: the heap 221 * 222 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will 223 * be called to setup deferred frees. Calls to free the buffer will 224 * return immediately and the actual free will occur some time later 225 */ 226 int ion_heap_init_deferred_free(struct ion_heap *heap); 227 228 /** 229 * ion_heap_freelist_add - add a buffer to the deferred free list 230 * @heap: the heap 231 * @buffer: the buffer 232 * 233 * Adds an item to the deferred freelist. 234 */ 235 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); 236 237 /** 238 * ion_heap_freelist_drain - drain the deferred free list 239 * @heap: the heap 240 * @size: amount of memory to drain in bytes 241 * 242 * Drains the indicated amount of memory from the deferred freelist immediately. 243 * Returns the total amount freed. The total freed may be higher depending 244 * on the size of the items in the list, or lower if there is insufficient 245 * total memory on the freelist. 246 */ 247 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); 248 249 /** 250 * ion_heap_freelist_shrink - drain the deferred free 251 * list, skipping any heap-specific 252 * pooling or caching mechanisms 253 * 254 * @heap: the heap 255 * @size: amount of memory to drain in bytes 256 * 257 * Drains the indicated amount of memory from the deferred freelist immediately. 258 * Returns the total amount freed. The total freed may be higher depending 259 * on the size of the items in the list, or lower if there is insufficient 260 * total memory on the freelist. 261 * 262 * Unlike with @ion_heap_freelist_drain, don't put any pages back into 263 * page pools or otherwise cache the pages. Everything must be 264 * genuinely free'd back to the system. If you're free'ing from a 265 * shrinker you probably want to use this. Note that this relies on 266 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE 267 * flag. 268 */ 269 size_t ion_heap_freelist_shrink(struct ion_heap *heap, 270 size_t size); 271 272 /** 273 * ion_heap_freelist_size - returns the size of the freelist in bytes 274 * @heap: the heap 275 */ 276 size_t ion_heap_freelist_size(struct ion_heap *heap); 277 278 /** 279 * functions for creating and destroying a heap pool -- allows you 280 * to keep a pool of pre allocated memory to use from your heap. Keeping 281 * a pool of memory that is ready for dma, ie any cached mapping have been 282 * invalidated from the cache, provides a significant performance benefit on 283 * many systems 284 */ 285 286 /** 287 * struct ion_page_pool - pagepool struct 288 * @high_count: number of highmem items in the pool 289 * @low_count: number of lowmem items in the pool 290 * @high_items: list of highmem items 291 * @low_items: list of lowmem items 292 * @mutex: lock protecting this struct and especially the count 293 * item list 294 * @gfp_mask: gfp_mask to use from alloc 295 * @order: order of pages in the pool 296 * @list: plist node for list of pools 297 * 298 * Allows you to keep a pool of pre allocated pages to use from your heap. 299 * Keeping a pool of pages that is ready for dma, ie any cached mapping have 300 * been invalidated from the cache, provides a significant performance benefit 301 * on many systems 302 */ 303 struct ion_page_pool { 304 int high_count; 305 int low_count; 306 struct list_head high_items; 307 struct list_head low_items; 308 struct mutex mutex; 309 gfp_t gfp_mask; 310 unsigned int order; 311 struct plist_node list; 312 }; 313 314 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 315 void ion_page_pool_destroy(struct ion_page_pool *pool); 316 struct page *ion_page_pool_alloc(struct ion_page_pool *pool); 317 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); 318 319 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool 320 * @pool: the pool 321 * @gfp_mask: the memory type to reclaim 322 * @nr_to_scan: number of items to shrink in pages 323 * 324 * returns the number of items freed in pages 325 */ 326 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, 327 int nr_to_scan); 328 329 long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 330 331 int ion_query_heaps(struct ion_heap_query *query); 332 333 #endif /* _ION_H */ 334