1 #if LV_BUILD_TEST
2 
3 #include "../lvgl.h"
4 #include "../../lvgl_private.h"
5 #include "lv_test_helpers.h"
6 
7 #include "unity/unity.h"
8 
9 static uint32_t MEM_SIZE = 0;
10 
11 // Cache size in bytes
12 #define CACHE_SIZE_BYTES 1000
13 
14 lv_cache_t * cache;
15 
16 typedef struct _test_data {
17     lv_cache_slot_size_t slot;
18 
19     int32_t key1;
20     int32_t key2;
21 
22     void * data; // malloced data
23 } test_data;
24 
compare_cb(const test_data * lhs,const test_data * rhs)25 static lv_cache_compare_res_t compare_cb(const test_data * lhs, const test_data * rhs)
26 {
27     if(lhs->key1 != rhs->key1) {
28         return lhs->key1 > rhs->key1 ? 1 : -1;
29     }
30     if(lhs->key2 != rhs->key2) {
31         return lhs->key2 > rhs->key2 ? 1 : -1;
32     }
33     return 0;
34 }
35 
free_cb(test_data * node,void * user_data)36 static void free_cb(test_data * node, void * user_data)
37 {
38     LV_UNUSED(user_data);
39     lv_free(node->data);
40 }
41 
setUp(void)42 void setUp(void)
43 {
44     /* Function run before every test */
45     MEM_SIZE = lv_test_get_free_mem();
46 
47     lv_cache_ops_t ops = {
48         .compare_cb = (lv_cache_compare_cb_t) compare_cb,
49         .create_cb = NULL,
50         .free_cb = (lv_cache_free_cb_t)free_cb,
51     };
52     cache = lv_cache_create(&lv_cache_class_lru_rb_size, sizeof(test_data), CACHE_SIZE_BYTES, ops);
53 }
54 
tearDown(void)55 void tearDown(void)
56 {
57     /* Function run after every test */
58     lv_cache_destroy(cache, NULL);
59     cache = NULL;
60 
61     TEST_ASSERT_MEM_LEAK_LESS_THAN(MEM_SIZE, 32);
62 }
63 
test_cache_1(void)64 void test_cache_1(void)
65 {
66 
67     void * record_data_ptr = NULL;
68 
69     // create many node unless cache is full
70     uint32_t curr_mem_size = 8;
71     uint32_t curr_total_mem_size = 0;
72     while(curr_total_mem_size < CACHE_SIZE_BYTES) {
73         test_data search_key = {
74             .slot.size = curr_mem_size,
75 
76             .key1 = (int32_t)curr_mem_size,
77             .key2 = (int32_t)curr_mem_size + 1
78         };
79 
80         // acquire cache first
81         lv_cache_entry_t * entry = lv_cache_acquire(cache, &search_key, NULL);
82         if(entry != NULL) {
83             continue;
84         }
85 
86         // if cache miss then add cache
87         entry = lv_cache_add(cache, &search_key, NULL);
88         TEST_ASSERT_NOT_NULL(entry);
89 
90         test_data * data = lv_cache_entry_get_data(entry);
91         TEST_ASSERT_NOT_NULL(data);
92 
93         data->data = lv_malloc(data->slot.size);
94 
95         // record data ptr when {key1 = 32, key2 = 33}.
96         if(search_key.key1 == 32 && search_key.key2 == 33) {
97             record_data_ptr = data->data;
98         }
99 
100         lv_cache_release(cache, entry, NULL);
101 
102         curr_total_mem_size += curr_mem_size;
103         curr_mem_size *= 2;
104 
105         TEST_PRINTF("cache free: %d, allocated: %d", lv_cache_get_free_size(cache, NULL), curr_mem_size);
106     }
107 
108     /*
109      * allocated = 8 + 16 + 32 + 64 + 128 + 256 - 8 - 16 + 512 = 992
110      * free = 1000 - 992 = 8
111      * The last node size should be 1024, but the cache's max size is 1000. So new entry will be allocated failed and
112      * the loop will break.
113      * */
114     TEST_ASSERT_EQUAL(8, lv_cache_get_free_size(cache, NULL));
115 
116     /*
117      * Search entry {key1 = 32, key2 = 33}
118      */
119     test_data search_key32 = {
120         .key1 = 32,
121         .key2 = 33
122     };
123     lv_cache_entry_t * entry_key32 = lv_cache_acquire(cache, &search_key32, NULL);
124 
125     test_data * cached_data_key32 = lv_cache_entry_get_data(entry_key32);
126     TEST_ASSERT_EQUAL(record_data_ptr, cached_data_key32->data);
127 
128     /*
129      * Now drop the cache {key1 = 32, key2 = 33}. However, this entry is acquired once without release, so `drop`
130      * will not release the memory allocated by this entry.
131      */
132     size_t mem_curr_free = lv_test_get_free_mem();
133     lv_cache_drop(cache, &search_key32, NULL);
134     /*
135      * Though it doesn't release the data, the entry and other structure has been freed.
136      * lv_rb_note_t (4 ptr + 1 int32 may align to 8 bit on 64 bit machine) + lv_ll (2 ptr + node_size).
137      * Also, the def heap has some other aligned attributes. It'll also affect the final result.
138      */
139     TEST_ASSERT_MEM_LEAK_LESS_THAN(mem_curr_free,
140                                    sizeof(lv_rb_node_t)
141                                    + sizeof(void *) + (sizeof(lv_ll_node_t *) + sizeof(lv_ll_node_t *))
142                                    + 32); // the last 32 is an error in memory allocating
143     mem_curr_free = lv_test_get_free_mem();
144     lv_cache_release(cache, entry_key32, NULL);
145     TEST_ASSERT_MEM_LEAK_LESS_THAN(mem_curr_free,
146                                    lv_cache_entry_get_size(sizeof(test_data)) + sizeof(void *)
147                                    + 32
148                                    + 32);
149 
150     // Now the freed cache size should be 8 + 32 = 40
151     TEST_ASSERT_EQUAL(40, lv_cache_get_free_size(cache, NULL));
152 }
153 
154 #endif
155