Lines Matching +full:- +full:- +full:include

10 #include "lv_wayland_smm.h"
11 #include "../../display/lv_display.h"
15 #include <stddef.h>
16 #include <errno.h>
17 #include <stdio.h>
18 #include <string.h>
19 #include <stdlib.h>
20 #include <stdbool.h>
21 #include <time.h>
22 #include <unistd.h>
23 #include <sys/mman.h>
24 #include <sys/stat.h>
25 #include <fcntl.h>
30 #define ROUND_UP(n, b) (((((n) ? (n) : 1) + (b) - 1) / (b)) * (b))
43 #define LL_FIRST(head) ((head)->first)
44 #define LL_LAST(head) ((head)->last)
46 #define LL_NEXT(src, member) ((src)->member.next)
47 #define LL_PREV(src, member) ((src)->member.prev)
50 (head)->first = NULL; \
51 (head)->last = NULL; \
55 (src)->member.next = NULL; \
56 (src)->member.prev = (head)->last; \
57 if ((head)->last == NULL) { \
58 (head)->first = (src); \
60 (head)->last->member.next = (src); \
62 (head)->last = (src); \
71 (src)->member.prev = (dest); \
72 (src)->member.next = (dest)->member.next; \
73 if ((dest)->member.next != NULL) { \
74 (dest)->member.next->member.prev = (src); \
76 (head)->last = (src); \
78 (dest)->member.next = (src); \
82 if ((src)->member.prev != NULL) { \
83 (src)->member.prev->member.next = (src)->member.next; \
85 (head)->first = (src)->member.next; \
87 if ((src)->member.next != NULL) { \
88 (src)->member.next->member.prev = (src)->member.prev; \
90 (head)->last = (src)->member.prev; \
99 #define WAYLAND_FD_NAME "/" SMM_FD_NAME "-XXXXX"
181 grp->size = smm_instance.page_sz; in smm_create()
182 grp->num_buffers = 0; in smm_create()
183 LL_INIT(&grp->unused); in smm_create()
184 LL_INIT(&grp->inuse); in smm_create()
185 LL_INIT(&grp->history); in smm_create()
201 rgrp->size = ROUND_UP(sz, smm_instance.page_sz); in smm_resize()
203 /* Return all unused buffers to pool (to be re-allocated at the new size) */ in smm_resize()
204 while(!LL_IS_EMPTY(&rgrp->unused)) { in smm_resize()
205 LL_DEQUEUE(buf, &rgrp->unused, use); in smm_resize()
210 LL_FOREACH(buf, &rgrp->inuse, use) { in smm_resize()
211 buf->group_resized = true; in smm_resize()
223 while(!LL_IS_EMPTY(&dgrp->unused)) { in smm_destroy()
224 LL_DEQUEUE(buf, &dgrp->unused, use); in smm_destroy()
231 while(!LL_IS_EMPTY(&dgrp->inuse)) { in smm_destroy()
232 LL_DEQUEUE(buf, &dgrp->inuse, use); in smm_destroy()
247 if(LL_IS_EMPTY(&agrp->unused)) { in smm_acquire()
253 LL_DEQUEUE(buf, &agrp->unused, use); in smm_acquire()
257 /* Add buffer to in-use queue */ in smm_acquire()
258 LL_ENQUEUE(&agrp->inuse, buf, use); in smm_acquire()
262 if(smm_instance.cbs.init_buffer(smm_instance.cbs.ctx, &buf->props)) { in smm_acquire()
272 /* Add to history a-new */ in smm_acquire()
273 LL_ENQUEUE(&agrp->history, buf, age); in smm_acquire()
284 struct smm_pool * pool = mbuf->props.pool; in smm_map()
285 void * map = pool->map; in smm_map()
287 if(pool->map_outdated) { in smm_map()
289 if(pool->map != NULL) { in smm_map()
290 munmap(pool->map, pool->map_size); in smm_map()
294 pool->props.size, in smm_map()
297 pool->props.fd, in smm_map()
302 pool->map = NULL; in smm_map()
305 pool->map = map; in smm_map()
306 pool->map_size = pool->props.size; in smm_map()
307 pool->map_outdated = false; in smm_map()
313 map = (((char *)map) + mbuf->props.offset); in smm_map()
323 struct smm_group * grp = rbuf->props.group; in smm_release()
325 /* Remove from in-use queue */ in smm_release()
326 LL_REMOVE(&grp->inuse, rbuf, use); in smm_release()
328 if(rbuf->group_resized) { in smm_release()
329 /* Buffer group was resized while this buffer was in-use, thus it must be in smm_release()
332 rbuf->group_resized = false; in smm_release()
337 LL_ENQUEUE(&grp->unused, rbuf, use); in smm_release()
340 while((grp->num_buffers > PREFER_NUM_BUFFERS) && in smm_release()
341 (!LL_IS_EMPTY(&grp->unused))) { in smm_release()
342 LL_DEQUEUE(rbuf, &grp->unused, use); in smm_release()
353 return LL_LAST(&lgrp->history); in smm_latest()
361 struct smm_group * grp = nbuf->props.group; in smm_next()
363 LL_FOREACH(ibuf, &grp->history, age) { in smm_next()
376 struct smm_group * grp = buf->props.group; in purge_history()
379 LL_FOREACH(ibuf, &grp->history, age) { in purge_history()
382 LL_DEQUEUE(ibuf, &grp->history, age); in purge_history()
393 struct smm_pool * buf_pool = buf->props.pool; in calc_buffer_size()
395 if(buf == LL_LAST(&buf_pool->allocd)) { in calc_buffer_size()
396 buf_sz = (buf_pool->props.size - buf->props.offset); in calc_buffer_size()
399 buf_sz = (LL_NEXT(buf, pool)->props.offset - buf->props.offset); in calc_buffer_size()
426 LL_FOREACH(buf, &smm_instance.active->allocd, pool) { in get_from_pool()
428 if(buf->props.group == NULL) { in get_from_pool()
430 if(buf_sz == grp->size) { in get_from_pool()
433 else if(buf_sz > grp->size) { in get_from_pool()
434 if((buf != LL_LAST(&smm_instance.active->allocd)) && in get_from_pool()
435 (LL_NEXT(buf, pool)->props.group == NULL)) { in get_from_pool()
437 LL_NEXT(buf, pool)->props.offset -= (buf_sz - grp->size); in get_from_pool()
441 alloc_buffer(buf, buf->props.offset + grp->size); in get_from_pool()
452 (last->props.group == NULL)) { in get_from_pool()
454 buf_sz = (grp->size - buf_sz); in get_from_pool()
458 buf_sz = grp->size; in get_from_pool()
463 buf = alloc_buffer(last, smm_instance.active->props.size); in get_from_pool()
470 ret = ftruncate(smm_instance.active->props.fd, in get_from_pool()
471 smm_instance.active->props.size + buf_sz); in get_from_pool()
479 smm_instance.active->props.size += buf_sz; in get_from_pool()
480 smm_instance.active->map_outdated = true; in get_from_pool()
483 if(!(smm_instance.active->props.size - buf_sz)) { in get_from_pool()
487 &smm_instance.active->props))) { in get_from_pool()
498 &smm_instance.active->props); in get_from_pool()
508 memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *)); in get_from_pool()
512 if(smm_instance.cbs.new_buffer(smm_instance.cbs.ctx, &buf->props)) { in get_from_pool()
514 memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *)); in get_from_pool()
521 smm_instance.statistics.active_used += grp->size; in get_from_pool()
522 grp->num_buffers++; in get_from_pool()
532 struct smm_group * grp = buf->props.group; in return_to_pool()
533 struct smm_pool * pool = buf->props.pool; in return_to_pool()
537 smm_instance.cbs.free_buffer(smm_instance.cbs.ctx, &buf->props); in return_to_pool()
544 grp->num_buffers--; in return_to_pool()
546 memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *)); in return_to_pool()
550 smm_instance.statistics.active_used -= calc_buffer_size(buf); in return_to_pool()
554 if((buf != LL_LAST(&pool->allocd)) && in return_to_pool()
555 (LL_NEXT(buf, pool)->props.group == NULL)) { in return_to_pool()
558 if((buf != LL_FIRST(&pool->allocd)) && in return_to_pool()
559 (LL_PREV(buf, pool)->props.group == NULL)) { in return_to_pool()
561 pool = buf->props.pool; in return_to_pool()
566 if((buf == LL_FIRST(&pool->allocd)) && in return_to_pool()
567 (buf == LL_LAST(&pool->allocd))) { in return_to_pool()
572 smm_instance.cbs.free_pool(smm_instance.cbs.ctx, &pool->props); in return_to_pool()
595 pool->props.fd = shm_open(name, in alloc_pool()
598 if(pool->props.fd >= 0) { in alloc_pool()
600 pool->props.size = 0; in alloc_pool()
601 pool->map = NULL; in alloc_pool()
602 pool->map_size = 0; in alloc_pool()
603 pool->map_outdated = false; in alloc_pool()
604 LL_INIT(&pool->allocd); in alloc_pool()
628 if(pool->map != NULL) { in free_pool()
629 munmap(pool->map, pool->map_size); in free_pool()
632 close(pool->props.fd); in free_pool()
650 memcpy(&buf->props, &initial_props, sizeof(struct smm_buffer_properties)); in alloc_buffer()
651 buf->group_resized = false; in alloc_buffer()
654 LL_ENQUEUE(&smm_instance.active->allocd, buf, pool); in alloc_buffer()
657 LL_INSERT_AFTER(&smm_instance.active->allocd, last, buf, pool); in alloc_buffer()
667 struct smm_pool * buf_pool = buf->props.pool; in free_buffer()
670 LL_REMOVE(&buf_pool->allocd, buf, pool); in free_buffer()