1 /**
2  * @file lv_wayland_smm.c
3  *
4  */
5 
6 typedef int dummy_t;     /* Make GCC on windows happy, avoid empty translation unit */
7 
8 #ifndef _WIN32
9 
10 #include "lv_wayland_smm.h"
11 #include "../../display/lv_display.h"
12 
13 #if LV_USE_WAYLAND
14 
15 #include <stddef.h>
16 #include <errno.h>
17 #include <stdio.h>
18 #include <string.h>
19 #include <stdlib.h>
20 #include <stdbool.h>
21 #include <time.h>
22 #include <unistd.h>
23 #include <sys/mman.h>
24 #include <sys/stat.h>
25 #include <fcntl.h>
26 
27 #define MAX_NAME_ATTEMPTS (5)
28 #define PREFER_NUM_BUFFERS (3)
29 
30 #define ROUND_UP(n, b) (((((n) ? (n) : 1) + (b) - 1) / (b)) * (b))
31 #define LLHEAD(type) \
32     struct { \
33         struct type *first; \
34         struct type *last; \
35     }
36 
37 #define LLLINK(type) \
38     struct { \
39         struct type *next; \
40         struct type *prev; \
41     }
42 
43 #define LL_FIRST(head) ((head)->first)
44 #define LL_LAST(head) ((head)->last)
45 #define LL_IS_EMPTY(head) (LL_FIRST(head) == NULL)
46 #define LL_NEXT(src, member) ((src)->member.next)
47 #define LL_PREV(src, member) ((src)->member.prev)
48 
49 #define LL_INIT(head) do { \
50         (head)->first = NULL; \
51         (head)->last = NULL; \
52     } while (0)
53 
54 #define LL_ENQUEUE(head, src, member) do { \
55         (src)->member.next = NULL; \
56         (src)->member.prev = (head)->last; \
57         if ((head)->last == NULL) { \
58             (head)->first = (src); \
59         } else { \
60             (head)->last->member.next = (src); \
61         } \
62         (head)->last = (src); \
63     } while (0)
64 
65 #define LL_DEQUEUE(entry, head, member) do { \
66         (entry) = LL_FIRST(head); \
67         LL_REMOVE(head, entry, member); \
68     } while (0)
69 
70 #define LL_INSERT_AFTER(head, dest, src, member) do { \
71         (src)->member.prev = (dest); \
72         (src)->member.next = (dest)->member.next; \
73         if ((dest)->member.next != NULL) { \
74             (dest)->member.next->member.prev = (src); \
75         } else { \
76             (head)->last = (src); \
77         } \
78         (dest)->member.next = (src); \
79     } while (0)
80 
81 #define LL_REMOVE(head, src, member) do { \
82         if ((src)->member.prev != NULL) { \
83             (src)->member.prev->member.next = (src)->member.next; \
84         } else { \
85             (head)->first = (src)->member.next; \
86         } \
87         if ((src)->member.next != NULL) { \
88             (src)->member.next->member.prev = (src)->member.prev; \
89         } else { \
90             (head)->last = (src)->member.prev; \
91         } \
92     } while (0)
93 
94 #define LL_FOREACH(entry, head, member) \
95     for ((entry) = LL_FIRST(head); \
96          (entry) != NULL; \
97          (entry) = LL_NEXT(entry, member))
98 
99 #define WAYLAND_FD_NAME "/" SMM_FD_NAME "-XXXXX"
100 
101 struct smm_pool {
102     struct smm_pool_properties props;
103     LLHEAD(smm_buffer) allocd;
104     void * map;
105     size_t map_size;
106     bool map_outdated;
107 };
108 
109 struct smm_buffer {
110     struct smm_buffer_properties props;
111     bool group_resized;
112     LLLINK(smm_buffer) pool;
113     LLLINK(smm_buffer) use;
114     LLLINK(smm_buffer) age;
115 };
116 
117 struct smm_group {
118     struct smm_group_properties props;
119     size_t size;
120     unsigned char num_buffers;
121     LLHEAD(smm_buffer) unused;
122     LLHEAD(smm_buffer) inuse;
123     LLHEAD(smm_buffer) history;
124     LLLINK(smm_group) link;
125 };
126 
127 static size_t calc_buffer_size(struct smm_buffer * buf);
128 static void purge_history(struct smm_buffer * buf);
129 static struct smm_buffer * get_from_pool(struct smm_group * grp);
130 static void return_to_pool(struct smm_buffer * buf);
131 static struct smm_pool * alloc_pool(void);
132 static void free_pool(struct smm_pool * pool);
133 static struct smm_buffer * alloc_buffer(struct smm_buffer * last, size_t offset);
134 static void free_buffer(struct smm_buffer * buf);
135 
136 static struct {
137     unsigned long page_sz;
138     struct smm_events cbs;
139     struct smm_pool * active;
140     LLHEAD(smm_group) groups;
141     struct {
142         size_t active_used;
143     } statistics;
144 } smm_instance;
145 
146 
smm_init(struct smm_events * evs)147 void smm_init(struct smm_events * evs)
148 {
149     memcpy(&smm_instance.cbs, evs, sizeof(struct smm_events));
150     srand((unsigned int)clock());
151     smm_instance.page_sz = (unsigned long)sysconf(_SC_PAGESIZE);
152     LL_INIT(&smm_instance.groups);
153 }
154 
155 
smm_deinit(void)156 void smm_deinit(void)
157 {
158     struct smm_group * grp;
159 
160     /* Destroy all buffer groups */
161     while(!LL_IS_EMPTY(&smm_instance.groups)) {
162         LL_DEQUEUE(grp, &smm_instance.groups, link);
163         smm_destroy(grp);
164     }
165 }
166 
167 
smm_setctx(void * ctx)168 void smm_setctx(void * ctx)
169 {
170     smm_instance.cbs.ctx = ctx;
171 }
172 
173 
smm_create(void)174 smm_group_t * smm_create(void)
175 {
176     struct smm_group * grp;
177 
178     /* Allocate and initialize a new buffer group */
179     grp = malloc(sizeof(struct smm_group));
180     if(grp != NULL) {
181         grp->size = smm_instance.page_sz;
182         grp->num_buffers = 0;
183         LL_INIT(&grp->unused);
184         LL_INIT(&grp->inuse);
185         LL_INIT(&grp->history);
186 
187         /* Add to instance groups queue */
188         LL_ENQUEUE(&smm_instance.groups, grp, link);
189     }
190 
191     return grp;
192 }
193 
194 
smm_resize(smm_group_t * grp,size_t sz)195 void smm_resize(smm_group_t * grp, size_t sz)
196 {
197     struct smm_buffer * buf;
198     struct smm_group * rgrp = grp;
199 
200     /* Round allocation size up to a sysconf(_SC_PAGE_SIZE) boundary */
201     rgrp->size = ROUND_UP(sz, smm_instance.page_sz);
202 
203     /* Return all unused buffers to pool (to be re-allocated at the new size) */
204     while(!LL_IS_EMPTY(&rgrp->unused)) {
205         LL_DEQUEUE(buf, &rgrp->unused, use);
206         return_to_pool(buf);
207     }
208 
209     /* Mark all buffers in use to be freed to pool when possible */
210     LL_FOREACH(buf, &rgrp->inuse, use) {
211         buf->group_resized = true;
212         purge_history(buf);
213     }
214 }
215 
216 
smm_destroy(smm_group_t * grp)217 void smm_destroy(smm_group_t * grp)
218 {
219     struct smm_buffer * buf;
220     struct smm_group * dgrp = grp;
221 
222     /* Return unused buffers */
223     while(!LL_IS_EMPTY(&dgrp->unused)) {
224         LL_DEQUEUE(buf, &dgrp->unused, use);
225         return_to_pool(buf);
226     }
227 
228     /* Return buffers that are still in use (ideally this queue should be empty
229      * at this time)
230      */
231     while(!LL_IS_EMPTY(&dgrp->inuse)) {
232         LL_DEQUEUE(buf, &dgrp->inuse, use);
233         return_to_pool(buf);
234     }
235 
236     /* Remove from instance groups queue */
237     LL_REMOVE(&smm_instance.groups, dgrp, link);
238     free(dgrp);
239 }
240 
241 
smm_acquire(smm_group_t * grp)242 smm_buffer_t * smm_acquire(smm_group_t * grp)
243 {
244     struct smm_buffer * buf;
245     struct smm_group * agrp = grp;
246 
247     if(LL_IS_EMPTY(&agrp->unused)) {
248         /* No unused buffer available, so get a new one from pool */
249         buf = get_from_pool(agrp);
250     }
251     else {
252         /* Otherwise, reuse an unused buffer */
253         LL_DEQUEUE(buf, &agrp->unused, use);
254     }
255 
256     if(buf != NULL) {
257         /* Add buffer to in-use queue */
258         LL_ENQUEUE(&agrp->inuse, buf, use);
259 
260         /* Emit 'init buffer' event */
261         if(smm_instance.cbs.init_buffer != NULL) {
262             if(smm_instance.cbs.init_buffer(smm_instance.cbs.ctx, &buf->props)) {
263                 smm_release(buf);
264                 buf = NULL;
265             }
266         }
267 
268         if(buf != NULL) {
269             /* Remove from history */
270             purge_history(buf);
271 
272             /* Add to history a-new */
273             LL_ENQUEUE(&agrp->history, buf, age);
274         }
275     }
276 
277     return buf;
278 }
279 
280 
smm_map(smm_buffer_t * buf)281 void * smm_map(smm_buffer_t * buf)
282 {
283     struct smm_buffer * mbuf = buf;
284     struct smm_pool * pool = mbuf->props.pool;
285     void * map = pool->map;
286 
287     if(pool->map_outdated) {
288         /* Update mapping to current pool size */
289         if(pool->map != NULL) {
290             munmap(pool->map, pool->map_size);
291         }
292 
293         map = mmap(NULL,
294                    pool->props.size,
295                    PROT_READ | PROT_WRITE,
296                    MAP_SHARED,
297                    pool->props.fd,
298                    0);
299 
300         if(map == MAP_FAILED) {
301             map = NULL;
302             pool->map = NULL;
303         }
304         else {
305             pool->map = map;
306             pool->map_size = pool->props.size;
307             pool->map_outdated = false;
308         }
309     }
310 
311     /* Calculate buffer mapping (from offset in pool) */
312     if(map != NULL) {
313         map = (((char *)map) + mbuf->props.offset);
314     }
315 
316     return map;
317 }
318 
319 
smm_release(smm_buffer_t * buf)320 void smm_release(smm_buffer_t * buf)
321 {
322     struct smm_buffer * rbuf = buf;
323     struct smm_group * grp = rbuf->props.group;
324 
325     /* Remove from in-use queue */
326     LL_REMOVE(&grp->inuse, rbuf, use);
327 
328     if(rbuf->group_resized) {
329         /* Buffer group was resized while this buffer was in-use, thus it must be
330          * returned to it's pool
331          */
332         rbuf->group_resized = false;
333         return_to_pool(rbuf);
334     }
335     else {
336         /* Move to unused queue */
337         LL_ENQUEUE(&grp->unused, rbuf, use);
338 
339         /* Try to limit total number of buffers to preferred number */
340         while((grp->num_buffers > PREFER_NUM_BUFFERS) &&
341               (!LL_IS_EMPTY(&grp->unused))) {
342             LL_DEQUEUE(rbuf, &grp->unused, use);
343             return_to_pool(rbuf);
344         }
345     }
346 }
347 
348 
smm_latest(smm_group_t * grp)349 smm_buffer_t * smm_latest(smm_group_t * grp)
350 {
351     struct smm_group * lgrp = grp;
352 
353     return LL_LAST(&lgrp->history);
354 }
355 
356 
smm_next(smm_buffer_t * buf)357 smm_buffer_t * smm_next(smm_buffer_t * buf)
358 {
359     struct smm_buffer * ibuf;
360     struct smm_buffer * nbuf = buf;
361     struct smm_group * grp = nbuf->props.group;
362 
363     LL_FOREACH(ibuf, &grp->history, age) {
364         if(ibuf == nbuf) {
365             ibuf = LL_NEXT(ibuf, age);
366             break;
367         }
368     }
369 
370     return ibuf;
371 }
372 
purge_history(struct smm_buffer * buf)373 void purge_history(struct smm_buffer * buf)
374 {
375     struct smm_buffer * ibuf;
376     struct smm_group * grp = buf->props.group;
377 
378     /* Remove from history (and any older) */
379     LL_FOREACH(ibuf, &grp->history, age) {
380         if(ibuf == buf) {
381             do {
382                 LL_DEQUEUE(ibuf, &grp->history, age);
383             } while(ibuf != buf);
384             break;
385         }
386     }
387 }
388 
389 
calc_buffer_size(struct smm_buffer * buf)390 size_t calc_buffer_size(struct smm_buffer * buf)
391 {
392     size_t buf_sz;
393     struct smm_pool * buf_pool = buf->props.pool;
394 
395     if(buf == LL_LAST(&buf_pool->allocd)) {
396         buf_sz = (buf_pool->props.size - buf->props.offset);
397     }
398     else {
399         buf_sz = (LL_NEXT(buf, pool)->props.offset - buf->props.offset);
400     }
401 
402     return buf_sz;
403 }
404 
405 
get_from_pool(struct smm_group * grp)406 struct smm_buffer * get_from_pool(struct smm_group * grp)
407 {
408     int ret;
409     size_t buf_sz;
410     struct smm_buffer * buf;
411     struct smm_buffer * last = NULL;
412 
413     /* TODO: Determine when to allocate a new active pool (i.e. memory shrink) */
414 
415     if(smm_instance.active == NULL) {
416         /* Allocate a new active pool */
417         smm_instance.active = alloc_pool();
418         smm_instance.statistics.active_used = 0;
419     }
420 
421     if(smm_instance.active == NULL) {
422         buf = NULL;
423     }
424     else {
425         /* Search for a free buffer large enough for allocation */
426         LL_FOREACH(buf, &smm_instance.active->allocd, pool) {
427             last = buf;
428             if(buf->props.group == NULL) {
429                 buf_sz = calc_buffer_size(buf);
430                 if(buf_sz == grp->size) {
431                     break;
432                 }
433                 else if(buf_sz > grp->size) {
434                     if((buf != LL_LAST(&smm_instance.active->allocd)) &&
435                        (LL_NEXT(buf, pool)->props.group == NULL)) {
436                         /* Pull back next buffer to use unallocated size */
437                         LL_NEXT(buf, pool)->props.offset -= (buf_sz - grp->size);
438                     }
439                     else {
440                         /* Allocate another buffer to hold unallocated size */
441                         alloc_buffer(buf, buf->props.offset + grp->size);
442                     }
443 
444                     break;
445                 }
446             }
447         }
448 
449         if(buf == NULL) {
450             /* No buffer found to meet allocation size, expand pool */
451             if((last != NULL) &&
452                (last->props.group == NULL)) {
453                 /* Use last free buffer */
454                 buf_sz = (grp->size - buf_sz);
455             }
456             else {
457                 /* Allocate new buffer */
458                 buf_sz = grp->size;
459                 if(last == NULL) {
460                     buf = alloc_buffer(NULL, 0);
461                 }
462                 else {
463                     buf = alloc_buffer(last, smm_instance.active->props.size);
464                 }
465                 last = buf;
466             }
467 
468             if(last != NULL) {
469                 /* Expand pool backing memory */
470                 ret = ftruncate(smm_instance.active->props.fd,
471                                 smm_instance.active->props.size + buf_sz);
472                 if(ret) {
473                     if(buf != NULL) {
474                         free_buffer(buf);
475                         buf = NULL;
476                     }
477                 }
478                 else {
479                     smm_instance.active->props.size += buf_sz;
480                     smm_instance.active->map_outdated = true;
481                     buf = last;
482 
483                     if(!(smm_instance.active->props.size - buf_sz)) {
484                         /* Emit 'new pool' event */
485                         if((smm_instance.cbs.new_pool != NULL) &&
486                            (smm_instance.cbs.new_pool(smm_instance.cbs.ctx,
487                                                       &smm_instance.active->props))) {
488                             free_buffer(buf);
489                             free_pool(smm_instance.active);
490                             smm_instance.active = NULL;
491                             buf = NULL;
492                         }
493                     }
494                     else {
495                         /* Emit 'expand pool' event */
496                         if(smm_instance.cbs.expand_pool != NULL) {
497                             smm_instance.cbs.expand_pool(smm_instance.cbs.ctx,
498                                                          &smm_instance.active->props);
499                         }
500                     }
501                 }
502             }
503         }
504     }
505 
506     if(buf != NULL) {
507         /* Set buffer group */
508         memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *));
509 
510         /* Emit 'new buffer' event */
511         if(smm_instance.cbs.new_buffer != NULL) {
512             if(smm_instance.cbs.new_buffer(smm_instance.cbs.ctx, &buf->props)) {
513                 grp = NULL;
514                 memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *));
515                 buf = NULL;
516             }
517         }
518 
519         if(buf != NULL) {
520             /* Update active pool usage statistic */
521             smm_instance.statistics.active_used += grp->size;
522             grp->num_buffers++;
523         }
524     }
525 
526     return buf;
527 }
528 
529 
return_to_pool(struct smm_buffer * buf)530 void return_to_pool(struct smm_buffer * buf)
531 {
532     struct smm_group * grp = buf->props.group;
533     struct smm_pool * pool = buf->props.pool;
534 
535     /* Emit 'free buffer' event */
536     if(smm_instance.cbs.free_buffer != NULL) {
537         smm_instance.cbs.free_buffer(smm_instance.cbs.ctx, &buf->props);
538     }
539 
540     /* Buffer is no longer part of history */
541     purge_history(buf);
542 
543     /* Buffer is no longer part of group */
544     grp->num_buffers--;
545     grp = NULL;
546     memcpy((void *)&buf->props.group, &grp, sizeof(struct smm_group *));
547 
548     /* Update active pool usage statistic */
549     if(smm_instance.active == pool) {
550         smm_instance.statistics.active_used -= calc_buffer_size(buf);
551     }
552 
553     /* Coalesce with ungrouped buffers beside this one */
554     if((buf != LL_LAST(&pool->allocd)) &&
555        (LL_NEXT(buf, pool)->props.group == NULL)) {
556         free_buffer(LL_NEXT(buf, pool));
557     }
558     if((buf != LL_FIRST(&pool->allocd)) &&
559        (LL_PREV(buf, pool)->props.group == NULL)) {
560         buf = LL_PREV(buf, pool);
561         pool = buf->props.pool;
562         free_buffer(LL_NEXT(buf, pool));
563     }
564 
565     /* Free buffer (and pool), if only remaining buffer in pool */
566     if((buf == LL_FIRST(&pool->allocd)) &&
567        (buf == LL_LAST(&pool->allocd))) {
568         free_buffer(buf);
569 
570         /* Emit 'free pool' event */
571         if(smm_instance.cbs.free_pool != NULL) {
572             smm_instance.cbs.free_pool(smm_instance.cbs.ctx, &pool->props);
573         }
574 
575         free_pool(pool);
576         if(smm_instance.active == pool) {
577             smm_instance.active = NULL;
578         }
579     }
580 }
581 
582 
alloc_pool(void)583 struct smm_pool * alloc_pool(void)
584 {
585     struct smm_pool * pool;
586     char name[] = WAYLAND_FD_NAME;
587     unsigned char attempts = 0;
588     bool opened = false;
589 
590     pool = malloc(sizeof(struct smm_pool));
591     if(pool != NULL) {
592         do {
593             /* A randomized pool name should help reduce collisions */
594             sprintf(name + sizeof(SMM_FD_NAME) + 1, "%05X", rand() & 0xFFFF);
595             pool->props.fd = shm_open(name,
596                                       O_RDWR | O_CREAT | O_EXCL,
597                                       S_IRUSR | S_IWUSR);
598             if(pool->props.fd >= 0) {
599                 shm_unlink(name);
600                 pool->props.size = 0;
601                 pool->map = NULL;
602                 pool->map_size = 0;
603                 pool->map_outdated = false;
604                 LL_INIT(&pool->allocd);
605                 opened = true;
606                 break;
607             }
608             else {
609                 if(errno != EEXIST) {
610                     break;
611                 }
612                 attempts++;
613             }
614         } while(attempts < MAX_NAME_ATTEMPTS);
615 
616         if(!opened) {
617             free(pool);
618             pool = NULL;
619         }
620     }
621 
622     return pool;
623 }
624 
625 
free_pool(struct smm_pool * pool)626 void free_pool(struct smm_pool * pool)
627 {
628     if(pool->map != NULL) {
629         munmap(pool->map, pool->map_size);
630     }
631 
632     close(pool->props.fd);
633     free(pool);
634 }
635 
636 
alloc_buffer(struct smm_buffer * last,size_t offset)637 struct smm_buffer * alloc_buffer(struct smm_buffer * last, size_t offset)
638 {
639     struct smm_buffer * buf;
640     struct smm_buffer_properties initial_props = {
641         {NULL},
642         NULL,
643         smm_instance.active,
644         offset
645     };
646 
647     /* Allocate and initialize a new buffer (including linking in to pool) */
648     buf = malloc(sizeof(struct smm_buffer));
649     if(buf != NULL) {
650         memcpy(&buf->props, &initial_props, sizeof(struct smm_buffer_properties));
651         buf->group_resized = false;
652 
653         if(last == NULL) {
654             LL_ENQUEUE(&smm_instance.active->allocd, buf, pool);
655         }
656         else {
657             LL_INSERT_AFTER(&smm_instance.active->allocd, last, buf, pool);
658         }
659     }
660 
661     return buf;
662 }
663 
664 
free_buffer(struct smm_buffer * buf)665 void free_buffer(struct smm_buffer * buf)
666 {
667     struct smm_pool * buf_pool = buf->props.pool;
668 
669     /* Remove from pool */
670     LL_REMOVE(&buf_pool->allocd, buf, pool);
671     free(buf);
672 }
673 
674 #endif /* LV_USE_WAYLAND */
675 #endif /* _WIN32 */
676