1 /**
2 * @file lv_cache_lru_rb.c
3 *
4 */
5
6 /***************************************************************************\
7 * *
8 * ┏ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ┓ *
9 * ┏ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ┌ ─ ─ ─ ┐ *
10 * ┌ ─ ─ ─ ─ ─ ─ ─ ┃ ┃ Cache insert ┃ *
11 * ┃ RB Tree │ │Hitting│ head *
12 * └ ─ ─ ─ ─ ─ ─ ─ ┃ ┃ ─ ─ ─ ─ ┃ *
13 * ┃ ┌─┬─┬─┬─┐ ┌─────┐ *
14 * ┌──│◄│B│►│ │─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┃─ ─ ╋ ─ ─▶│ B │ ┃ *
15 * ┃ │ └─┴─┴─┴─┘ └──▲──┘ *
16 * │ │ ┃ ┃ │ ┃ *
17 * ┃ │ │ ┌──┴──┐ *
18 * │ └──────┐ ┌ ─┃─ ─ ╋ ─ ─▶│ E │ ┃ *
19 * ┃ ▼ ┌ ─ ─│─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐ └──▲──┘ *
20 * ┌─┬─┬─┬─┐ ▼ │ ┃ ┃ │ ┃ *
21 * ┃│◄│A│►│ │─ ─ ┘ ┌─┬─┬─┬─┐ │ ┌──┴──┐ *
22 * └─┴─┴─┴─┘ ┌───│◄│D│►│ │─ ─ ─ ─ ─ ─│─ ╋ ┐ ┃ ─ ▶│ A │ ┌ ─ ─ ─ ─ ─ ┐ ┃ *
23 * ┃ │ └─┴─┴─┴─┘ └──▲──┘ LRU *
24 * │ │ │ ┃ │ ┃ │ │ Cache │ ┃ *
25 * ┃ ▼ └──────┐ ┌──┴──┐ ─ ─ ─ ─ ─ ─ *
26 * ┌─┬─┬─┬─┐ ▼ │ ┃ └ ─┃─ ─ ▶│ D │ ┃ *
27 * ┃ │◄│C│►│ │─ ─ ┌─┬─┬─┬─┐ └──▲──┘ *
28 * └─┴─┴─┴─┘ │ │◄│E│►│ │─ ┘ ┃ ┃ │ ┃ *
29 * ┃ └─┴─┴─┴─┘ ┌──┴──┐ *
30 * │ │ ─ ╋ ─ ─┃─ ─ ▶│ C │ ┃ *
31 * ┃ ─ ─ ─ ─ ┼ ─ ─ ┘ └──▲──┘ *
32 * ▼ ┃ ┃ ┌ ─ ─│─ ─ ┐ ┃ *
33 * ┃ ┌─┬─┬─┬─┐ ┌──┴──┐ *
34 * │◄│F│►│ │─ ─┃─ ─ ╋ ─ ┼▶│ F │ │ ┃ *
35 * ┃ └─┴─┴─┴─┘ └─────┘ *
36 * ┃ ┃ └ ─ ─ ─ ─ ┘ ┃ *
37 * ┃ remove *
38 * ┃ ┃ tail ┃ *
39 * ┗ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ *
40 * *
41 \***************************************************************************/
42
43 /*********************
44 * INCLUDES
45 *********************/
46 #include "lv_cache_lru_rb.h"
47 #include "../../stdlib/lv_sprintf.h"
48 #include "../../stdlib/lv_string.h"
49 #include "../lv_ll.h"
50 #include "../lv_rb_private.h"
51 #include "../lv_rb.h"
52 #include "../lv_iter.h"
53
54 /*********************
55 * DEFINES
56 *********************/
57
58 /**********************
59 * TYPEDEFS
60 **********************/
61 typedef uint32_t (get_data_size_cb_t)(const void * data);
62
63 struct _lv_lru_rb_t {
64 lv_cache_t cache;
65
66 lv_rb_t rb;
67 lv_ll_t ll;
68
69 get_data_size_cb_t * get_data_size_cb;
70 };
71 typedef struct _lv_lru_rb_t lv_lru_rb_t_;
72 /**********************
73 * STATIC PROTOTYPES
74 **********************/
75
76 static void * alloc_cb(void);
77 static bool init_cnt_cb(lv_cache_t * cache);
78 static bool init_size_cb(lv_cache_t * cache);
79 static void destroy_cb(lv_cache_t * cache, void * user_data);
80
81 static lv_cache_entry_t * get_cb(lv_cache_t * cache, const void * key, void * user_data);
82 static lv_cache_entry_t * add_cb(lv_cache_t * cache, const void * key, void * user_data);
83 static void remove_cb(lv_cache_t * cache, lv_cache_entry_t * entry, void * user_data);
84 static void drop_cb(lv_cache_t * cache, const void * key, void * user_data);
85 static void drop_all_cb(lv_cache_t * cache, void * user_data);
86 static lv_cache_entry_t * get_victim_cb(lv_cache_t * cache, void * user_data);
87 static lv_cache_reserve_cond_res_t reserve_cond_cb(lv_cache_t * cache, const void * key, size_t reserved_size,
88 void * user_data);
89
90 static void * alloc_new_node(lv_lru_rb_t_ * lru, void * key, void * user_data);
91 inline static void ** get_lru_node(lv_lru_rb_t_ * lru, lv_rb_node_t * node);
92
93 static uint32_t cnt_get_data_size_cb(const void * data);
94 static uint32_t size_get_data_size_cb(const void * data);
95
96 static lv_iter_t * cache_iter_create_cb(lv_cache_t * cache);
97 static lv_result_t cache_iter_next_cb(void * instance, void * context, void * elem);
98
99 /**********************
100 * GLOBAL VARIABLES
101 **********************/
102 const lv_cache_class_t lv_cache_class_lru_rb_count = {
103 .alloc_cb = alloc_cb,
104 .init_cb = init_cnt_cb,
105 .destroy_cb = destroy_cb,
106
107 .get_cb = get_cb,
108 .add_cb = add_cb,
109 .remove_cb = remove_cb,
110 .drop_cb = drop_cb,
111 .drop_all_cb = drop_all_cb,
112 .get_victim_cb = get_victim_cb,
113 .reserve_cond_cb = reserve_cond_cb,
114 .iter_create_cb = cache_iter_create_cb,
115 };
116
117 const lv_cache_class_t lv_cache_class_lru_rb_size = {
118 .alloc_cb = alloc_cb,
119 .init_cb = init_size_cb,
120 .destroy_cb = destroy_cb,
121
122 .get_cb = get_cb,
123 .add_cb = add_cb,
124 .remove_cb = remove_cb,
125 .drop_cb = drop_cb,
126 .drop_all_cb = drop_all_cb,
127 .get_victim_cb = get_victim_cb,
128 .reserve_cond_cb = reserve_cond_cb,
129 .iter_create_cb = cache_iter_create_cb,
130 };
131 /**********************
132 * STATIC VARIABLES
133 **********************/
134
135 /**********************
136 * MACROS
137 **********************/
138
139 /**********************
140 * GLOBAL FUNCTIONS
141 **********************/
142
143 /**********************
144 * STATIC FUNCTIONS
145 **********************/
alloc_new_node(lv_lru_rb_t_ * lru,void * key,void * user_data)146 static void * alloc_new_node(lv_lru_rb_t_ * lru, void * key, void * user_data)
147 {
148 LV_UNUSED(user_data);
149
150 LV_ASSERT_NULL(lru);
151 LV_ASSERT_NULL(key);
152
153 if(lru == NULL || key == NULL) {
154 return NULL;
155 }
156
157 lv_rb_node_t * node = lv_rb_insert(&lru->rb, key);
158 if(node == NULL)
159 goto FAILED_HANDLER2;
160
161 void * data = node->data;
162 lv_cache_entry_t * entry = lv_cache_entry_get_entry(data, lru->cache.node_size);
163 lv_memcpy(data, key, lru->cache.node_size);
164
165 void * lru_node = lv_ll_ins_head(&lru->ll);
166 if(lru_node == NULL)
167 goto FAILED_HANDLER1;
168
169 lv_memcpy(lru_node, &node, sizeof(void *));
170 lv_memcpy(get_lru_node(lru, node), &lru_node, sizeof(void *));
171
172 lv_cache_entry_init(entry, &lru->cache, lru->cache.node_size);
173 goto FAILED_HANDLER2;
174
175 FAILED_HANDLER1:
176 lv_rb_drop_node(&lru->rb, node);
177 node = NULL;
178 FAILED_HANDLER2:
179 return node;
180 }
181
get_lru_node(lv_lru_rb_t_ * lru,lv_rb_node_t * node)182 inline static void ** get_lru_node(lv_lru_rb_t_ * lru, lv_rb_node_t * node)
183 {
184 return (void **)((char *)node->data + lru->rb.size - sizeof(void *));
185 }
186
alloc_cb(void)187 static void * alloc_cb(void)
188 {
189 void * res = lv_malloc(sizeof(lv_lru_rb_t_));
190 LV_ASSERT_MALLOC(res);
191 if(res == NULL) {
192 LV_LOG_ERROR("malloc failed");
193 return NULL;
194 }
195
196 lv_memzero(res, sizeof(lv_lru_rb_t_));
197 return res;
198 }
199
init_cnt_cb(lv_cache_t * cache)200 static bool init_cnt_cb(lv_cache_t * cache)
201 {
202 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
203
204 LV_ASSERT_NULL(lru->cache.ops.compare_cb);
205 LV_ASSERT_NULL(lru->cache.ops.free_cb);
206 LV_ASSERT(lru->cache.node_size > 0);
207
208 if(lru->cache.node_size <= 0 || lru->cache.ops.compare_cb == NULL || lru->cache.ops.free_cb == NULL) {
209 return false;
210 }
211
212 /*add void* to store the ll node pointer*/
213 if(!lv_rb_init(&lru->rb, lru->cache.ops.compare_cb, lv_cache_entry_get_size(lru->cache.node_size) + sizeof(void *))) {
214 return false;
215 }
216 lv_ll_init(&lru->ll, sizeof(void *));
217
218 lru->get_data_size_cb = cnt_get_data_size_cb;
219
220 return true;
221 }
222
init_size_cb(lv_cache_t * cache)223 static bool init_size_cb(lv_cache_t * cache)
224 {
225 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
226
227 LV_ASSERT_NULL(lru->cache.ops.compare_cb);
228 LV_ASSERT_NULL(lru->cache.ops.free_cb);
229 LV_ASSERT(lru->cache.node_size > 0);
230
231 if(lru->cache.node_size <= 0 || lru->cache.ops.compare_cb == NULL || lru->cache.ops.free_cb == NULL) {
232 return false;
233 }
234
235 /*add void* to store the ll node pointer*/
236 if(!lv_rb_init(&lru->rb, lru->cache.ops.compare_cb, lv_cache_entry_get_size(lru->cache.node_size) + sizeof(void *))) {
237 return false;
238 }
239 lv_ll_init(&lru->ll, sizeof(void *));
240
241 lru->get_data_size_cb = size_get_data_size_cb;
242
243 return true;
244 }
245
destroy_cb(lv_cache_t * cache,void * user_data)246 static void destroy_cb(lv_cache_t * cache, void * user_data)
247 {
248 LV_UNUSED(user_data);
249
250 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
251
252 LV_ASSERT_NULL(lru);
253
254 if(lru == NULL) {
255 return;
256 }
257
258 cache->clz->drop_all_cb(cache, user_data);
259 }
260
get_cb(lv_cache_t * cache,const void * key,void * user_data)261 static lv_cache_entry_t * get_cb(lv_cache_t * cache, const void * key, void * user_data)
262 {
263 LV_UNUSED(user_data);
264
265 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
266
267 LV_ASSERT_NULL(lru);
268 LV_ASSERT_NULL(key);
269
270 if(lru == NULL || key == NULL) {
271 return NULL;
272 }
273
274 /*try the first ll node first*/
275 void * head = lv_ll_get_head(&lru->ll);
276 if(head) {
277 lv_rb_node_t * node = *(lv_rb_node_t **)head;
278 void * data = node->data;
279 lv_cache_entry_t * entry = lv_cache_entry_get_entry(data, cache->node_size);
280 if(lru->cache.ops.compare_cb(data, key) == 0) {
281 return entry;
282 }
283 }
284
285 lv_rb_node_t * node = lv_rb_find(&lru->rb, key);
286 /*cache hit*/
287 if(node) {
288 void * lru_node = *get_lru_node(lru, node);
289 head = lv_ll_get_head(&lru->ll);
290 lv_ll_move_before(&lru->ll, lru_node, head);
291
292 lv_cache_entry_t * entry = lv_cache_entry_get_entry(node->data, cache->node_size);
293 return entry;
294 }
295 return NULL;
296 }
297
add_cb(lv_cache_t * cache,const void * key,void * user_data)298 static lv_cache_entry_t * add_cb(lv_cache_t * cache, const void * key, void * user_data)
299 {
300 LV_UNUSED(user_data);
301
302 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
303
304 LV_ASSERT_NULL(lru);
305 LV_ASSERT_NULL(key);
306
307 if(lru == NULL || key == NULL) {
308 return NULL;
309 }
310
311 lv_rb_node_t * new_node = alloc_new_node(lru, (void *)key, user_data);
312 if(new_node == NULL) {
313 return NULL;
314 }
315
316 lv_cache_entry_t * entry = lv_cache_entry_get_entry(new_node->data, cache->node_size);
317
318 cache->size += lru->get_data_size_cb(key);
319
320 return entry;
321 }
322
remove_cb(lv_cache_t * cache,lv_cache_entry_t * entry,void * user_data)323 static void remove_cb(lv_cache_t * cache, lv_cache_entry_t * entry, void * user_data)
324 {
325 LV_UNUSED(user_data);
326
327 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
328
329 LV_ASSERT_NULL(lru);
330 LV_ASSERT_NULL(entry);
331
332 if(lru == NULL || entry == NULL) {
333 return;
334 }
335
336 void * data = lv_cache_entry_get_data(entry);
337 lv_rb_node_t * node = lv_rb_find(&lru->rb, data);
338 if(node == NULL) {
339 return;
340 }
341
342 void * lru_node = *get_lru_node(lru, node);
343 lv_rb_remove_node(&lru->rb, node);
344 lv_ll_remove(&lru->ll, lru_node);
345 lv_free(lru_node);
346
347 cache->size -= lru->get_data_size_cb(data);
348 }
349
drop_cb(lv_cache_t * cache,const void * key,void * user_data)350 static void drop_cb(lv_cache_t * cache, const void * key, void * user_data)
351 {
352 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
353
354 LV_ASSERT_NULL(lru);
355 LV_ASSERT_NULL(key);
356
357 if(lru == NULL || key == NULL) {
358 return;
359 }
360
361 lv_rb_node_t * node = lv_rb_find(&lru->rb, key);
362 if(node == NULL) {
363 return;
364 }
365
366 void * data = node->data;
367
368 lru->cache.ops.free_cb(data, user_data);
369 cache->size -= lru->get_data_size_cb(data);
370
371 lv_cache_entry_t * entry = lv_cache_entry_get_entry(data, cache->node_size);
372 void * lru_node = *get_lru_node(lru, node);
373
374 lv_rb_remove_node(&lru->rb, node);
375 lv_cache_entry_delete(entry);
376
377 lv_ll_remove(&lru->ll, lru_node);
378 lv_free(lru_node);
379 }
380
drop_all_cb(lv_cache_t * cache,void * user_data)381 static void drop_all_cb(lv_cache_t * cache, void * user_data)
382 {
383 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
384
385 LV_ASSERT_NULL(lru);
386
387 if(lru == NULL) {
388 return;
389 }
390
391 uint32_t used_cnt = 0;
392 lv_rb_node_t ** node;
393 LV_LL_READ(&lru->ll, node) {
394 /*free user handled data and do other clean up*/
395 void * search_key = (*node)->data;
396 lv_cache_entry_t * entry = lv_cache_entry_get_entry(search_key, cache->node_size);
397 if(lv_cache_entry_get_ref(entry) == 0) {
398 lru->cache.ops.free_cb(search_key, user_data);
399 }
400 else {
401 LV_LOG_WARN("entry (%p) is still referenced (%" LV_PRId32 ")", (void *)entry, lv_cache_entry_get_ref(entry));
402 used_cnt++;
403 }
404 }
405 if(used_cnt > 0) {
406 LV_LOG_WARN("%" LV_PRId32 " entries are still referenced", used_cnt);
407 }
408
409 lv_rb_destroy(&lru->rb);
410 lv_ll_clear(&lru->ll);
411
412 cache->size = 0;
413 }
414
get_victim_cb(lv_cache_t * cache,void * user_data)415 static lv_cache_entry_t * get_victim_cb(lv_cache_t * cache, void * user_data)
416 {
417 LV_UNUSED(user_data);
418
419 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
420
421 LV_ASSERT_NULL(lru);
422
423 lv_rb_node_t ** tail;
424 LV_LL_READ_BACK(&lru->ll, tail) {
425 lv_rb_node_t * tail_node = *tail;
426 lv_cache_entry_t * entry = lv_cache_entry_get_entry(tail_node->data, cache->node_size);
427 if(lv_cache_entry_get_ref(entry) == 0) {
428 return entry;
429 }
430 }
431
432 return NULL;
433 }
434
reserve_cond_cb(lv_cache_t * cache,const void * key,size_t reserved_size,void * user_data)435 static lv_cache_reserve_cond_res_t reserve_cond_cb(lv_cache_t * cache, const void * key, size_t reserved_size,
436 void * user_data)
437 {
438 LV_UNUSED(user_data);
439
440 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)cache;
441
442 LV_ASSERT_NULL(lru);
443
444 if(lru == NULL) {
445 return LV_CACHE_RESERVE_COND_ERROR;
446 }
447
448 uint32_t data_size = key ? lru->get_data_size_cb(key) : 0;
449 if(data_size > lru->cache.max_size) {
450 LV_LOG_ERROR("data size (%" LV_PRIu32 ") is larger than max size (%" LV_PRIu32 ")", data_size, lru->cache.max_size);
451 return LV_CACHE_RESERVE_COND_TOO_LARGE;
452 }
453
454 return cache->size + reserved_size + data_size > lru->cache.max_size
455 ? LV_CACHE_RESERVE_COND_NEED_VICTIM
456 : LV_CACHE_RESERVE_COND_OK;
457 }
458
cnt_get_data_size_cb(const void * data)459 static uint32_t cnt_get_data_size_cb(const void * data)
460 {
461 LV_UNUSED(data);
462 return 1;
463 }
464
size_get_data_size_cb(const void * data)465 static uint32_t size_get_data_size_cb(const void * data)
466 {
467 lv_cache_slot_size_t * slot = (lv_cache_slot_size_t *)data;
468 return slot->size;
469 }
470
cache_iter_create_cb(lv_cache_t * cache)471 static lv_iter_t * cache_iter_create_cb(lv_cache_t * cache)
472 {
473 return lv_iter_create(cache, lv_cache_entry_get_size(cache->node_size), sizeof(void *), cache_iter_next_cb);
474 }
475
cache_iter_next_cb(void * instance,void * context,void * elem)476 static lv_result_t cache_iter_next_cb(void * instance, void * context, void * elem)
477 {
478 lv_lru_rb_t_ * lru = (lv_lru_rb_t_ *)instance;
479 lv_rb_node_t *** ll_node = context;
480
481 LV_ASSERT_NULL(ll_node);
482
483 if(*ll_node == NULL) *ll_node = lv_ll_get_head(&lru->ll);
484 else *ll_node = lv_ll_get_next(&lru->ll, *ll_node);
485
486 lv_rb_node_t ** node = *ll_node;
487
488 if(node == NULL) return LV_RESULT_INVALID;
489
490 uint32_t node_size = lru->cache.node_size;
491 void * search_key = (*node)->data;
492 lv_memcpy(elem, search_key, lv_cache_entry_get_size(node_size));
493
494 return LV_RESULT_OK;
495 }
496