1 /**
2 * @file lv_draw.c
3 *
4 */
5
6 /**
7 * Modified by NXP in 2024
8 */
9
10 /*********************
11 * INCLUDES
12 *********************/
13 #include "../misc/lv_area_private.h"
14 #include "../misc/lv_assert.h"
15 #include "lv_draw_private.h"
16 #include "sw/lv_draw_sw.h"
17 #include "../display/lv_display_private.h"
18 #include "../core/lv_global.h"
19 #include "../core/lv_refr_private.h"
20 #include "../stdlib/lv_string.h"
21
22 /*********************
23 * DEFINES
24 *********************/
25 #define _draw_info LV_GLOBAL_DEFAULT()->draw_info
26
27 /**********************
28 * TYPEDEFS
29 **********************/
30
31 /**********************
32 * STATIC PROTOTYPES
33 **********************/
34 static bool is_independent(lv_layer_t * layer, lv_draw_task_t * t_check);
35 static void lv_cleanup_task(lv_draw_task_t * t, lv_display_t * disp);
36
37 #if LV_LOG_LEVEL <= LV_LOG_LEVEL_INFO
get_layer_size_kb(uint32_t size_byte)38 static inline uint32_t get_layer_size_kb(uint32_t size_byte)
39 {
40 return (size_byte + 1023) >> 10;
41 }
42 #endif
43
44 /**********************
45 * STATIC VARIABLES
46 **********************/
47
48 /**********************
49 * MACROS
50 **********************/
51
52 /**********************
53 * GLOBAL FUNCTIONS
54 **********************/
55
lv_draw_init(void)56 void lv_draw_init(void)
57 {
58 #if LV_USE_OS
59 lv_thread_sync_init(&_draw_info.sync);
60 #endif
61 }
62
lv_draw_deinit(void)63 void lv_draw_deinit(void)
64 {
65 #if LV_USE_OS
66 lv_thread_sync_delete(&_draw_info.sync);
67 #endif
68
69 lv_draw_unit_t * u = _draw_info.unit_head;
70 while(u) {
71 lv_draw_unit_t * cur_unit = u;
72 u = u->next;
73
74 if(cur_unit->delete_cb) cur_unit->delete_cb(cur_unit);
75 lv_free(cur_unit);
76 }
77 _draw_info.unit_head = NULL;
78 }
79
lv_draw_create_unit(size_t size)80 void * lv_draw_create_unit(size_t size)
81 {
82 lv_draw_unit_t * new_unit = lv_malloc_zeroed(size);
83 LV_ASSERT_MALLOC(new_unit);
84 new_unit->next = _draw_info.unit_head;
85 _draw_info.unit_head = new_unit;
86 _draw_info.unit_cnt++;
87
88 return new_unit;
89 }
90
lv_draw_add_task(lv_layer_t * layer,const lv_area_t * coords)91 lv_draw_task_t * lv_draw_add_task(lv_layer_t * layer, const lv_area_t * coords)
92 {
93 LV_PROFILER_DRAW_BEGIN;
94 lv_draw_task_t * new_task = lv_malloc_zeroed(sizeof(lv_draw_task_t));
95 LV_ASSERT_MALLOC(new_task);
96 new_task->area = *coords;
97 new_task->_real_area = *coords;
98 new_task->clip_area = layer->_clip_area;
99 #if LV_DRAW_TRANSFORM_USE_MATRIX
100 new_task->matrix = layer->matrix;
101 #endif
102 new_task->state = LV_DRAW_TASK_STATE_QUEUED;
103
104 /*Find the tail*/
105 if(layer->draw_task_head == NULL) {
106 layer->draw_task_head = new_task;
107 }
108 else {
109 lv_draw_task_t * tail = layer->draw_task_head;
110 while(tail->next) tail = tail->next;
111
112 tail->next = new_task;
113 }
114
115 LV_PROFILER_DRAW_END;
116 return new_task;
117 }
118
lv_draw_finalize_task_creation(lv_layer_t * layer,lv_draw_task_t * t)119 void lv_draw_finalize_task_creation(lv_layer_t * layer, lv_draw_task_t * t)
120 {
121 LV_PROFILER_DRAW_BEGIN;
122 lv_draw_dsc_base_t * base_dsc = t->draw_dsc;
123 base_dsc->layer = layer;
124
125 lv_draw_global_info_t * info = &_draw_info;
126
127 /*Send LV_EVENT_DRAW_TASK_ADDED and dispatch only on the "main" draw_task
128 *and not on the draw tasks added in the event.
129 *Sending LV_EVENT_DRAW_TASK_ADDED events might cause recursive event sends and besides
130 *dispatching might remove the "main" draw task while it's still being used in the event*/
131
132 if(info->task_running == false) {
133 if(base_dsc->obj && lv_obj_has_flag(base_dsc->obj, LV_OBJ_FLAG_SEND_DRAW_TASK_EVENTS)) {
134 info->task_running = true;
135 lv_obj_send_event(base_dsc->obj, LV_EVENT_DRAW_TASK_ADDED, t);
136 info->task_running = false;
137 }
138
139 /*Let the draw units set their preference score*/
140 t->preference_score = 100;
141 t->preferred_draw_unit_id = 0;
142 lv_draw_unit_t * u = info->unit_head;
143 while(u) {
144 if(u->evaluate_cb) {
145 LV_PROFILER_DRAW_BEGIN_TAG("evaluate_cb");
146 LV_PROFILER_DRAW_BEGIN_TAG(u->name);
147 u->evaluate_cb(u, t);
148 LV_PROFILER_DRAW_END_TAG(u->name);
149 LV_PROFILER_DRAW_END_TAG("evaluate_cb");
150 }
151 u = u->next;
152 }
153 if(t->preferred_draw_unit_id == LV_DRAW_UNIT_NONE) {
154 LV_LOG_WARN("the draw task was not taken by any units");
155 t->state = LV_DRAW_TASK_STATE_READY;
156 }
157 else {
158 lv_draw_dispatch();
159 }
160 }
161 else {
162 /*Let the draw units set their preference score*/
163 t->preference_score = 100;
164 t->preferred_draw_unit_id = 0;
165 lv_draw_unit_t * u = info->unit_head;
166 while(u) {
167 if(u->evaluate_cb) {
168 LV_PROFILER_DRAW_BEGIN_TAG("evaluate_cb");
169 LV_PROFILER_DRAW_BEGIN_TAG(u->name);
170 u->evaluate_cb(u, t);
171 LV_PROFILER_DRAW_END_TAG(u->name);
172 LV_PROFILER_DRAW_END_TAG("evaluate_cb");
173 }
174 u = u->next;
175 }
176 }
177 LV_PROFILER_DRAW_END;
178 }
179
lv_draw_wait_for_finish(void)180 void lv_draw_wait_for_finish(void)
181 {
182 #if LV_USE_OS
183 LV_PROFILER_DRAW_BEGIN;
184 lv_draw_unit_t * u = _draw_info.unit_head;
185 while(u) {
186 if(u->wait_for_finish_cb) {
187 LV_PROFILER_DRAW_BEGIN_TAG("wait_for_finish_cb");
188 LV_PROFILER_DRAW_BEGIN_TAG(u->name);
189 u->wait_for_finish_cb(u);
190 LV_PROFILER_DRAW_END_TAG(u->name);
191 LV_PROFILER_DRAW_END_TAG("wait_for_finish_cb");
192 }
193 u = u->next;
194 }
195 LV_PROFILER_DRAW_END;
196 #endif
197 }
198
lv_draw_dispatch(void)199 void lv_draw_dispatch(void)
200 {
201 LV_PROFILER_DRAW_BEGIN;
202 bool task_dispatched = false;
203 lv_display_t * disp = lv_display_get_next(NULL);
204 while(disp) {
205 lv_layer_t * layer = disp->layer_head;
206 while(layer) {
207 if(lv_draw_dispatch_layer(disp, layer))
208 task_dispatched = true;
209 layer = layer->next;
210 }
211 if(!task_dispatched) {
212 lv_draw_wait_for_finish();
213 lv_draw_dispatch_request();
214 }
215 disp = lv_display_get_next(disp);
216 }
217 LV_PROFILER_DRAW_END;
218 }
219
lv_draw_dispatch_layer(lv_display_t * disp,lv_layer_t * layer)220 bool lv_draw_dispatch_layer(lv_display_t * disp, lv_layer_t * layer)
221 {
222 LV_PROFILER_DRAW_BEGIN;
223 /*Remove the finished tasks first*/
224 lv_draw_task_t * t_prev = NULL;
225 lv_draw_task_t * t = layer->draw_task_head;
226 lv_draw_task_t * t_next;
227 while(t) {
228 t_next = t->next;
229 if(t->state == LV_DRAW_TASK_STATE_READY) {
230 lv_cleanup_task(t, disp);
231 if(t_prev != NULL)
232 t_prev->next = t_next;
233 else
234 layer->draw_task_head = t_next;
235 }
236 else {
237 t_prev = t;
238 }
239 t = t_next;
240 }
241
242 bool task_dispatched = false;
243
244 /*This layer is ready, enable blending its buffer*/
245 if(layer->parent && layer->all_tasks_added && layer->draw_task_head == NULL) {
246 /*Find a draw task with TYPE_LAYER in the layer where the src is this layer*/
247 lv_draw_task_t * t_src = layer->parent->draw_task_head;
248 while(t_src) {
249 if(t_src->type == LV_DRAW_TASK_TYPE_LAYER && t_src->state == LV_DRAW_TASK_STATE_WAITING) {
250 lv_draw_image_dsc_t * draw_dsc = t_src->draw_dsc;
251 if(draw_dsc->src == layer) {
252 t_src->state = LV_DRAW_TASK_STATE_QUEUED;
253 lv_draw_dispatch_request();
254 break;
255 }
256 }
257 t_src = t_src->next;
258 }
259 }
260 /*Assign draw tasks to the draw_units*/
261 else {
262 /*Find a draw unit which is not busy and can take at least one task*/
263 /*Let all draw units to pick draw tasks*/
264 lv_draw_unit_t * u = _draw_info.unit_head;
265 while(u) {
266 LV_PROFILER_DRAW_BEGIN_TAG("dispatch_cb");
267 LV_PROFILER_DRAW_BEGIN_TAG(u->name);
268 int32_t taken_cnt = u->dispatch_cb(u, layer);
269 LV_PROFILER_DRAW_END_TAG(u->name);
270 LV_PROFILER_DRAW_END_TAG("dispatch_cb");
271 if(taken_cnt != LV_DRAW_UNIT_IDLE) task_dispatched = true;
272 u = u->next;
273 }
274 }
275
276 LV_PROFILER_DRAW_END;
277 return task_dispatched;
278 }
279
lv_draw_dispatch_wait_for_request(void)280 void lv_draw_dispatch_wait_for_request(void)
281 {
282 LV_PROFILER_DRAW_BEGIN;
283 #if LV_USE_OS
284 lv_thread_sync_wait(&_draw_info.sync);
285 #else
286 while(!_draw_info.dispatch_req);
287 _draw_info.dispatch_req = 0;
288 #endif
289 LV_PROFILER_DRAW_END;
290 }
291
lv_draw_dispatch_request(void)292 void lv_draw_dispatch_request(void)
293 {
294 LV_PROFILER_DRAW_BEGIN;
295 #if LV_USE_OS
296 lv_thread_sync_signal(&_draw_info.sync);
297 #else
298 _draw_info.dispatch_req = 1;
299 #endif
300 LV_PROFILER_DRAW_END;
301 }
302
lv_draw_get_unit_count(void)303 uint32_t lv_draw_get_unit_count(void)
304 {
305 return _draw_info.unit_cnt;
306 }
307
lv_draw_get_next_available_task(lv_layer_t * layer,lv_draw_task_t * t_prev,uint8_t draw_unit_id)308 lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_task_t * t_prev, uint8_t draw_unit_id)
309 {
310 LV_PROFILER_DRAW_BEGIN;
311
312 /* If there is only 1 draw unit the task can be consumed linearly as
313 * they are added in the correct order. However, it can happen that
314 * there is a `LV_DRAW_TASK_TYPE_LAYER` which can be blended only when
315 * all its tasks are ready. As other areas might be on top of that
316 * layer-to-blend don't skip it. Instead stop there, so that the
317 * draw tasks of that layer can be consumed and can be finished.
318 * After that this layer-to-blenf will have `LV_DRAW_TASK_STATE_QUEUED`
319 * so it can be blended normally.*/
320 if(_draw_info.unit_cnt <= 1) {
321 lv_draw_task_t * t = layer->draw_task_head;
322 while(t) {
323 /*Not queued yet, leave this layer while the first task will be queued*/
324 if(t->state != LV_DRAW_TASK_STATE_QUEUED) {
325 t = NULL;
326 break;
327 }
328 /*It's a supported and queued task, process it*/
329 else {
330 break;
331 }
332 t = t->next;
333 }
334 LV_PROFILER_DRAW_END;
335 return t;
336 }
337
338 /*Handle the case of multiply draw units*/
339
340 /*If the first task is screen sized, there cannot be independent areas*/
341 if(layer->draw_task_head) {
342 int32_t hor_res = lv_display_get_horizontal_resolution(lv_refr_get_disp_refreshing());
343 int32_t ver_res = lv_display_get_vertical_resolution(lv_refr_get_disp_refreshing());
344 lv_draw_task_t * t = layer->draw_task_head;
345 if(t->state != LV_DRAW_TASK_STATE_QUEUED &&
346 t->area.x1 <= 0 && t->area.x2 >= hor_res - 1 &&
347 t->area.y1 <= 0 && t->area.y2 >= ver_res - 1) {
348 LV_PROFILER_DRAW_END;
349 return NULL;
350 }
351 }
352
353 lv_draw_task_t * t = t_prev ? t_prev->next : layer->draw_task_head;
354 while(t) {
355 /*Find a queued and independent task*/
356 if(t->state == LV_DRAW_TASK_STATE_QUEUED &&
357 (t->preferred_draw_unit_id == LV_DRAW_UNIT_NONE || t->preferred_draw_unit_id == draw_unit_id) &&
358 is_independent(layer, t)) {
359 LV_PROFILER_DRAW_END;
360 return t;
361 }
362 t = t->next;
363 }
364
365 LV_PROFILER_DRAW_END;
366 return NULL;
367 }
368
lv_draw_get_dependent_count(lv_draw_task_t * t_check)369 uint32_t lv_draw_get_dependent_count(lv_draw_task_t * t_check)
370 {
371 if(t_check == NULL) return 0;
372 if(t_check->next == NULL) return 0;
373
374 LV_PROFILER_DRAW_BEGIN;
375 uint32_t cnt = 0;
376
377 lv_draw_task_t * t = t_check->next;
378 while(t) {
379 if((t->state == LV_DRAW_TASK_STATE_QUEUED || t->state == LV_DRAW_TASK_STATE_WAITING) &&
380 lv_area_is_on(&t_check->area, &t->area)) {
381 cnt++;
382 }
383
384 t = t->next;
385 }
386 LV_PROFILER_DRAW_END;
387 return cnt;
388 }
389
lv_layer_init(lv_layer_t * layer)390 void lv_layer_init(lv_layer_t * layer)
391 {
392 LV_ASSERT_NULL(layer);
393 lv_memzero(layer, sizeof(lv_layer_t));
394 lv_layer_reset(layer);
395 }
396
lv_layer_reset(lv_layer_t * layer)397 void lv_layer_reset(lv_layer_t * layer)
398 {
399 LV_ASSERT_NULL(layer);
400 #if LV_DRAW_TRANSFORM_USE_MATRIX
401 lv_matrix_identity(&layer->matrix);
402 #endif
403 layer->opa = LV_OPA_COVER;
404 }
405
lv_draw_layer_create(lv_layer_t * parent_layer,lv_color_format_t color_format,const lv_area_t * area)406 lv_layer_t * lv_draw_layer_create(lv_layer_t * parent_layer, lv_color_format_t color_format, const lv_area_t * area)
407 {
408 LV_PROFILER_DRAW_BEGIN;
409 lv_layer_t * new_layer = lv_malloc_zeroed(sizeof(lv_layer_t));
410 LV_ASSERT_MALLOC(new_layer);
411 if(new_layer == NULL) {
412 LV_PROFILER_DRAW_END;
413 return NULL;
414 }
415
416 lv_draw_layer_init(new_layer, parent_layer, color_format, area);
417
418 /*Inherits transparency from parent*/
419 if(parent_layer) {
420 new_layer->opa = parent_layer->opa;
421 }
422
423 LV_PROFILER_DRAW_END;
424 return new_layer;
425 }
426
lv_draw_layer_init(lv_layer_t * layer,lv_layer_t * parent_layer,lv_color_format_t color_format,const lv_area_t * area)427 void lv_draw_layer_init(lv_layer_t * layer, lv_layer_t * parent_layer, lv_color_format_t color_format,
428 const lv_area_t * area)
429 {
430 LV_PROFILER_DRAW_BEGIN;
431 lv_layer_init(layer);
432 lv_display_t * disp = lv_refr_get_disp_refreshing();
433
434 layer->parent = parent_layer;
435 layer->_clip_area = *area;
436 layer->buf_area = *area;
437 layer->phy_clip_area = *area;
438 layer->color_format = color_format;
439
440 if(disp->layer_init) disp->layer_init(disp, layer);
441
442 if(disp->layer_head) {
443 lv_layer_t * tail = disp->layer_head;
444 while(tail->next) tail = tail->next;
445 tail->next = layer;
446 }
447 else {
448 disp->layer_head = layer;
449 }
450
451 LV_PROFILER_DRAW_END;
452 }
453
454
lv_draw_layer_alloc_buf(lv_layer_t * layer)455 void * lv_draw_layer_alloc_buf(lv_layer_t * layer)
456 {
457 LV_PROFILER_DRAW_BEGIN;
458 /*If the buffer of the layer is already allocated return it*/
459 if(layer->draw_buf != NULL) {
460 LV_PROFILER_DRAW_END;
461 return layer->draw_buf->data;
462 }
463
464 /*If the buffer of the layer is not allocated yet, allocate it now*/
465 int32_t w = lv_area_get_width(&layer->buf_area);
466 int32_t h = lv_area_get_height(&layer->buf_area);
467 uint32_t layer_size_byte = h * lv_draw_buf_width_to_stride(w, layer->color_format);
468
469 #if LV_DRAW_LAYER_MAX_MEMORY > 0
470 /* Do not allocate the layer if the sum of allocated layer sizes
471 * will exceed `LV_DRAW_LAYER_MAX_MEMORY` */
472 if((_draw_info.used_memory_for_layers + layer_size_byte) > LV_DRAW_LAYER_MAX_MEMORY) {
473 LV_LOG_WARN("LV_DRAW_LAYER_MAX_MEMORY was reached when allocating the layer.");
474 return NULL;
475 }
476 #endif
477
478 layer->draw_buf = lv_draw_buf_create(w, h, layer->color_format, 0);
479
480 if(layer->draw_buf == NULL) {
481 LV_LOG_WARN("Allocating layer buffer failed. Try later");
482 LV_PROFILER_DRAW_END;
483 return NULL;
484 }
485
486 _draw_info.used_memory_for_layers += layer_size_byte;
487 LV_LOG_INFO("Layer memory used: %" LV_PRIu32 " kB", get_layer_size_kb(_draw_info.used_memory_for_layers));
488
489 if(lv_color_format_has_alpha(layer->color_format)) {
490 lv_draw_buf_clear(layer->draw_buf, NULL);
491 }
492
493 LV_PROFILER_DRAW_END;
494 return layer->draw_buf->data;
495 }
496
lv_draw_layer_go_to_xy(lv_layer_t * layer,int32_t x,int32_t y)497 void * lv_draw_layer_go_to_xy(lv_layer_t * layer, int32_t x, int32_t y)
498 {
499 return lv_draw_buf_goto_xy(layer->draw_buf, x, y);
500 }
501
lv_draw_task_get_type(const lv_draw_task_t * t)502 lv_draw_task_type_t lv_draw_task_get_type(const lv_draw_task_t * t)
503 {
504 return t->type;
505 }
506
lv_draw_task_get_draw_dsc(const lv_draw_task_t * t)507 void * lv_draw_task_get_draw_dsc(const lv_draw_task_t * t)
508 {
509 return t->draw_dsc;
510 }
511
lv_draw_task_get_area(const lv_draw_task_t * t,lv_area_t * area)512 void lv_draw_task_get_area(const lv_draw_task_t * t, lv_area_t * area)
513 {
514 *area = t->area;
515 }
516
517 /**********************
518 * STATIC FUNCTIONS
519 **********************/
520
521 /**
522 * Check if there are older draw task overlapping the area of `t_check`
523 * @param layer the draw ctx to search in
524 * @param t_check check this task if it overlaps with the older ones
525 * @return true: `t_check` is not overlapping with older tasks so it's independent
526 */
is_independent(lv_layer_t * layer,lv_draw_task_t * t_check)527 static bool is_independent(lv_layer_t * layer, lv_draw_task_t * t_check)
528 {
529 LV_PROFILER_DRAW_BEGIN;
530 lv_draw_task_t * t = layer->draw_task_head;
531
532 /*If t_check is outside of the older tasks then it's independent*/
533 while(t && t != t_check) {
534 if(t->state != LV_DRAW_TASK_STATE_READY) {
535 lv_area_t a;
536 if(lv_area_intersect(&a, &t->_real_area, &t_check->_real_area)) {
537 LV_PROFILER_DRAW_END;
538 return false;
539 }
540 }
541 t = t->next;
542 }
543 LV_PROFILER_DRAW_END;
544
545 return true;
546 }
547
548 /**
549 * Clean-up resources allocated by a finished task
550 * @param t pointer to a draw task
551 * @param disp pointer to a display on which the task was drawn
552 */
lv_cleanup_task(lv_draw_task_t * t,lv_display_t * disp)553 static void lv_cleanup_task(lv_draw_task_t * t, lv_display_t * disp)
554 {
555 LV_PROFILER_DRAW_BEGIN;
556 /*If it was layer drawing free the layer too*/
557 if(t->type == LV_DRAW_TASK_TYPE_LAYER) {
558 lv_draw_image_dsc_t * draw_image_dsc = t->draw_dsc;
559 lv_layer_t * layer_drawn = (lv_layer_t *)draw_image_dsc->src;
560
561 if(layer_drawn->draw_buf) {
562 int32_t h = lv_area_get_height(&layer_drawn->buf_area);
563 uint32_t layer_size_byte = h * layer_drawn->draw_buf->header.stride;
564
565 if(_draw_info.used_memory_for_layers >= layer_size_byte) {
566 _draw_info.used_memory_for_layers -= layer_size_byte;
567 }
568 else {
569 _draw_info.used_memory_for_layers = 0;
570 LV_LOG_WARN("More layers were freed than allocated");
571 }
572 LV_LOG_INFO("Layer memory used: %" LV_PRIu32 " kB", get_layer_size_kb(_draw_info.used_memory_for_layers));
573 lv_draw_buf_destroy(layer_drawn->draw_buf);
574 layer_drawn->draw_buf = NULL;
575 }
576
577 /*Remove the layer from the display's*/
578 if(disp) {
579 lv_layer_t * l2 = disp->layer_head;
580 while(l2) {
581 if(l2->next == layer_drawn) {
582 l2->next = layer_drawn->next;
583 break;
584 }
585 l2 = l2->next;
586 }
587
588 if(disp->layer_deinit) {
589 LV_PROFILER_DRAW_BEGIN_TAG("layer_deinit");
590 disp->layer_deinit(disp, layer_drawn);
591 LV_PROFILER_DRAW_END_TAG("layer_deinit");
592 }
593 lv_free(layer_drawn);
594 }
595 }
596 lv_draw_label_dsc_t * draw_label_dsc = lv_draw_task_get_label_dsc(t);
597 if(draw_label_dsc && draw_label_dsc->text_local) {
598 lv_free((void *)draw_label_dsc->text);
599 draw_label_dsc->text = NULL;
600 }
601
602 lv_free(t->draw_dsc);
603 lv_free(t);
604 LV_PROFILER_DRAW_END;
605 }
606