1 /**
2 * @file lv_draw_sw_blend_al88.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_al88.h"
10 #if LV_USE_DRAW_SW
11
12 #if LV_DRAW_SW_SUPPORT_AL88
13
14 #include "lv_draw_sw_blend_private.h"
15 #include "../../../misc/lv_math.h"
16 #include "../../../display/lv_display.h"
17 #include "../../../core/lv_refr.h"
18 #include "../../../misc/lv_color.h"
19 #include "../../../stdlib/lv_string.h"
20
21 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
22 #include "neon/lv_blend_neon.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
24 #include "helium/lv_blend_helium.h"
25 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
26 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
27 #endif
28
29 /*********************
30 * DEFINES
31 *********************/
32
33 /**********************
34 * TYPEDEFS
35 **********************/
36
37 typedef struct {
38 lv_color16a_t fg_saved;
39 lv_color16a_t bg_saved;
40 lv_color16a_t res_saved;
41 lv_opa_t res_alpha_saved;
42 lv_opa_t ratio_saved;
43 } lv_color_mix_alpha_cache_t;
44
45 /**********************
46 * STATIC PROTOTYPES
47 **********************/
48
49 #if LV_DRAW_SW_SUPPORT_L8
50 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
51 #endif
52
53 #if LV_DRAW_SW_SUPPORT_I1
54 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
55
56 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
57 #endif
58
59 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
60
61 #if LV_DRAW_SW_SUPPORT_RGB565
62 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
63 #endif
64
65 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
66 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
67 const uint8_t src_px_size);
68 #endif
69
70 #if LV_DRAW_SW_SUPPORT_ARGB8888
71 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
72 #endif
73
74 static void lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache);
75
76 static inline void /* LV_ATTRIBUTE_FAST_MEM */ blend_non_normal_pixel(lv_color16a_t * dest, lv_color16a_t src,
77 lv_blend_mode_t mode, lv_color_mix_alpha_cache_t * cache);
78
79 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
80
81 static inline bool lv_color16a_eq(lv_color16a_t c1, lv_color16a_t c2);
82
83 static inline lv_color16a_t /* LV_ATTRIBUTE_FAST_MEM */ lv_color_mix16a(lv_color16a_t fg, lv_color16a_t bg);
84
85 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_16a_16a_mix(lv_color16a_t src, lv_color16a_t * dest,
86 lv_color_mix_alpha_cache_t * cache);
87
88 /**********************
89 * STATIC VARIABLES
90 **********************/
91
92 /**********************
93 * MACROS
94 **********************/
95
96 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_AL88
97 #define LV_DRAW_SW_COLOR_BLEND_TO_AL88(...) LV_RESULT_INVALID
98 #endif
99
100 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_OPA
101 #define LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
102 #endif
103
104 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_MASK
105 #define LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
106 #endif
107
108 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_AL88_MIX_MASK_OPA
109 #define LV_DRAW_SW_COLOR_BLEND_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
110 #endif
111
112 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88
113 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88(...) LV_RESULT_INVALID
114 #endif
115
116 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_OPA
117 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
118 #endif
119
120 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_MASK
121 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
122 #endif
123
124 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA
125 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
126 #endif
127
128 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88
129 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88(...) LV_RESULT_INVALID
130 #endif
131
132 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_OPA
133 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
134 #endif
135
136 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_MASK
137 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
138 #endif
139
140 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA
141 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
142 #endif
143
144 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88
145 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88(...) LV_RESULT_INVALID
146 #endif
147
148 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_OPA
149 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
150 #endif
151
152 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_MASK
153 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
154 #endif
155
156 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA
157 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
158 #endif
159
160
161 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88
162 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88(...) LV_RESULT_INVALID
163 #endif
164
165 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_OPA
166 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
167 #endif
168
169 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_MASK
170 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
171 #endif
172
173 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA
174 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
175 #endif
176
177 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88
178 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88(...) LV_RESULT_INVALID
179 #endif
180
181 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_OPA
182 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_OPA(...) LV_RESULT_INVALID
183 #endif
184
185 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_MASK
186 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_MASK(...) LV_RESULT_INVALID
187 #endif
188
189 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA
190 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(...) LV_RESULT_INVALID
191 #endif
192
193 /**********************
194 * GLOBAL FUNCTIONS
195 **********************/
196
lv_draw_sw_blend_color_to_al88(lv_draw_sw_blend_fill_dsc_t * dsc)197 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_al88(lv_draw_sw_blend_fill_dsc_t * dsc)
198 {
199 int32_t w = dsc->dest_w;
200 int32_t h = dsc->dest_h;
201 lv_opa_t opa = dsc->opa;
202 const lv_opa_t * mask = dsc->mask_buf;
203 int32_t mask_stride = dsc->mask_stride;
204 int32_t dest_stride = dsc->dest_stride;
205
206 lv_color_mix_alpha_cache_t cache;
207 lv_color_mix_with_alpha_cache_init(&cache);
208
209 int32_t x;
210 int32_t y;
211
212 LV_UNUSED(w);
213 LV_UNUSED(h);
214 LV_UNUSED(x);
215 LV_UNUSED(y);
216 LV_UNUSED(opa);
217 LV_UNUSED(mask);
218 LV_UNUSED(mask_stride);
219 LV_UNUSED(dest_stride);
220
221 /*Simple fill*/
222 if(mask == NULL && opa >= LV_OPA_MAX) {
223 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_AL88(dsc)) {
224 lv_color16a_t color16a;
225 color16a.lumi = lv_color_luminance(dsc->color);
226 color16a.alpha = 255;
227 lv_color16a_t * dest_buf = dsc->dest_buf;
228 for(y = 0; y < h; y++) {
229 for(x = 0; x < w - 16; x += 16) {
230 dest_buf[x + 0] = color16a;
231 dest_buf[x + 1] = color16a;
232 dest_buf[x + 2] = color16a;
233 dest_buf[x + 3] = color16a;
234
235 dest_buf[x + 4] = color16a;
236 dest_buf[x + 5] = color16a;
237 dest_buf[x + 6] = color16a;
238 dest_buf[x + 7] = color16a;
239
240 dest_buf[x + 8] = color16a;
241 dest_buf[x + 9] = color16a;
242 dest_buf[x + 10] = color16a;
243 dest_buf[x + 11] = color16a;
244
245 dest_buf[x + 12] = color16a;
246 dest_buf[x + 13] = color16a;
247 dest_buf[x + 14] = color16a;
248 dest_buf[x + 15] = color16a;
249 }
250 for(; x < w; x ++) {
251 dest_buf[x] = color16a;
252 }
253
254 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
255 }
256 }
257 }
258 /*Opacity only*/
259 else if(mask == NULL && opa < LV_OPA_MAX) {
260 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_OPA(dsc)) {
261 lv_color16a_t color16a;
262 color16a.lumi = lv_color_luminance(dsc->color);
263 color16a.alpha = opa;
264 lv_color16a_t * dest_buf = dsc->dest_buf;
265 for(y = 0; y < h; y++) {
266 for(x = 0; x < w; x++) {
267 lv_color_16a_16a_mix(color16a, &dest_buf[x], &cache);
268 }
269 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
270 }
271 }
272
273 }
274 /*Masked with full opacity*/
275 else if(mask && opa >= LV_OPA_MAX) {
276 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_AL88_WITH_MASK(dsc)) {
277 lv_color16a_t color16a;
278 color16a.lumi = lv_color_luminance(dsc->color);
279 lv_color16a_t * dest_buf = (lv_color16a_t *)dsc->dest_buf;
280 for(y = 0; y < h; y++) {
281 for(x = 0; x < w; x++) {
282 color16a.alpha = mask[x];
283 lv_color_16a_16a_mix(color16a, &dest_buf[x], &cache);
284 }
285 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
286 mask += mask_stride;
287 }
288 }
289
290 }
291 /*Masked with opacity*/
292 else {
293 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_AL88_MIX_MASK_OPA(dsc)) {
294 lv_color16a_t color16a;
295 color16a.lumi = lv_color_luminance(dsc->color);
296 lv_color16a_t * dest_buf = (lv_color16a_t *)dsc->dest_buf;
297 for(y = 0; y < h; y++) {
298 for(x = 0; x < w; x++) {
299 color16a.alpha = LV_OPA_MIX2(mask[x], opa);
300 lv_color_16a_16a_mix(color16a, &dest_buf[x], &cache);
301 }
302 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
303 mask += mask_stride;
304 }
305 }
306 }
307 }
308
lv_draw_sw_blend_image_to_al88(lv_draw_sw_blend_image_dsc_t * dsc)309 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_al88(lv_draw_sw_blend_image_dsc_t * dsc)
310 {
311 switch(dsc->src_color_format) {
312 #if LV_DRAW_SW_SUPPORT_RGB565
313 case LV_COLOR_FORMAT_RGB565:
314 rgb565_image_blend(dsc);
315 break;
316 #endif
317 #if LV_DRAW_SW_SUPPORT_RGB888
318 case LV_COLOR_FORMAT_RGB888:
319 rgb888_image_blend(dsc, 3);
320 break;
321 #endif
322 #if LV_DRAW_SW_SUPPORT_XRGB8888
323 case LV_COLOR_FORMAT_XRGB8888:
324 rgb888_image_blend(dsc, 4);
325 break;
326 #endif
327 #if LV_DRAW_SW_SUPPORT_ARGB8888
328 case LV_COLOR_FORMAT_ARGB8888:
329 argb8888_image_blend(dsc);
330 break;
331 #endif
332 #if LV_DRAW_SW_SUPPORT_L8
333 case LV_COLOR_FORMAT_L8:
334 l8_image_blend(dsc);
335 break;
336 #endif
337 case LV_COLOR_FORMAT_AL88:
338 al88_image_blend(dsc);
339 break;
340 #if LV_DRAW_SW_SUPPORT_I1
341 case LV_COLOR_FORMAT_I1:
342 i1_image_blend(dsc);
343 break;
344 #endif
345 default:
346 LV_LOG_WARN("Not supported source color format");
347 break;
348 }
349 }
350
351 /**********************
352 * STATIC FUNCTIONS
353 **********************/
354 #if LV_DRAW_SW_SUPPORT_I1
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)355 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
356 {
357 int32_t w = dsc->dest_w;
358 int32_t h = dsc->dest_h;
359 lv_opa_t opa = dsc->opa;
360 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
361 int32_t dest_stride = dsc->dest_stride;
362 const uint8_t * src_buf_i1 = dsc->src_buf;
363 int32_t src_stride = dsc->src_stride;
364 const lv_opa_t * mask_buf = dsc->mask_buf;
365 int32_t mask_stride = dsc->mask_stride;
366
367 lv_color_mix_alpha_cache_t cache;
368 lv_color_mix_with_alpha_cache_init(&cache);
369
370 int32_t x, y;
371
372 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
373 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
374 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88(dsc)) {
375 for(y = 0; y < h; y++) {
376 for(x = 0; x < w; x++) {
377 dest_buf_al88[x].lumi = get_bit(src_buf_i1, x) * 255;
378 dest_buf_al88[x].alpha = 255;
379 }
380 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
381 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
382 }
383 }
384 }
385 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
386 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc)) {
387 for(y = 0; y < h; y++) {
388 for(x = 0; x < w; x++) {
389 lv_color16a_t src_color;
390 src_color.lumi = get_bit(src_buf_i1, x) * 255;
391 src_color.alpha = opa;
392 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
393 }
394 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
395 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
396 }
397 }
398 }
399 else if(mask_buf && opa >= LV_OPA_MAX) {
400 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc)) {
401 for(y = 0; y < h; y++) {
402 for(x = 0; x < w; x++) {
403 lv_color16a_t src_color;
404 src_color.lumi = get_bit(src_buf_i1, x) * 255;
405 src_color.alpha = mask_buf[x];
406 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
407 }
408 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
409 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
410 mask_buf += mask_stride;
411 }
412 }
413 }
414 else if(mask_buf && opa < LV_OPA_MAX) {
415 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc)) {
416 for(y = 0; y < h; y++) {
417 for(x = 0; x < w; x++) {
418 lv_color16a_t src_color;
419 src_color.lumi = get_bit(src_buf_i1, x) * 255;
420 src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
421 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
422 }
423 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
424 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
425 mask_buf += mask_stride;
426 }
427 }
428 }
429 else {
430 for(y = 0; y < h; y++) {
431 for(x = 0; x < w; x++) {
432 lv_color16a_t src_color;
433 src_color.lumi = get_bit(src_buf_i1, x) * 255;
434 if(mask_buf == NULL) src_color.alpha = opa;
435 else src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
436 blend_non_normal_pixel(&dest_buf_al88[x], src_color, dsc->blend_mode, &cache);
437 }
438 if(mask_buf) mask_buf += mask_stride;
439 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
440 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
441 }
442 }
443 }
444 }
445 #endif
446
447 #if LV_DRAW_SW_SUPPORT_L8
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)448 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
449 {
450 int32_t w = dsc->dest_w;
451 int32_t h = dsc->dest_h;
452 lv_opa_t opa = dsc->opa;
453 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
454 int32_t dest_stride = dsc->dest_stride;
455 const uint8_t * src_buf_l8 = dsc->src_buf;
456 int32_t src_stride = dsc->src_stride;
457 const lv_opa_t * mask_buf = dsc->mask_buf;
458 int32_t mask_stride = dsc->mask_stride;
459
460 lv_color_mix_alpha_cache_t cache;
461 lv_color_mix_with_alpha_cache_init(&cache);
462
463 int32_t x, y;
464
465 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
466 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
467 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88(dsc)) {
468 for(y = 0; y < h; y++) {
469 for(x = 0; x < w; x++) {
470 dest_buf_al88[x].lumi = src_buf_l8[x];
471 dest_buf_al88[x].alpha = 255;
472 }
473 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
474 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
475 }
476 }
477 }
478 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
479 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc)) {
480 for(y = 0; y < h; y++) {
481 for(x = 0; x < w; x++) {
482 lv_color16a_t src_color;
483 src_color.lumi = src_buf_l8[x];
484 src_color.alpha = opa;
485 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
486 }
487 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
488 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
489 }
490 }
491 }
492 else if(mask_buf && opa >= LV_OPA_MAX) {
493 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc)) {
494 for(y = 0; y < h; y++) {
495 for(x = 0; x < w; x++) {
496 lv_color16a_t src_color;
497 src_color.lumi = src_buf_l8[x];
498 src_color.alpha = mask_buf[x];
499 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
500 }
501 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
502 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
503 mask_buf += mask_stride;
504 }
505 }
506 }
507 else if(mask_buf && opa < LV_OPA_MAX) {
508 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc)) {
509 for(y = 0; y < h; y++) {
510 for(x = 0; x < w; x++) {
511 lv_color16a_t src_color;
512 src_color.lumi = src_buf_l8[x];
513 src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
514 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
515 }
516 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
517 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
518 mask_buf += mask_stride;
519 }
520 }
521 }
522 }
523 else {
524 for(y = 0; y < h; y++) {
525 for(x = 0; x < w; x++) {
526 lv_color16a_t src_color;
527 src_color.lumi = src_buf_l8[x];
528 if(mask_buf == NULL) src_color.alpha = opa;
529 else src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
530 blend_non_normal_pixel(&dest_buf_al88[x], src_color, dsc->blend_mode, &cache);
531 }
532 if(mask_buf) mask_buf += mask_stride;
533 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
534 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
535 }
536 }
537 }
538
539 #endif
540
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)541 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
542 {
543 int32_t w = dsc->dest_w;
544 int32_t h = dsc->dest_h;
545 lv_opa_t opa = dsc->opa;
546 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
547 int32_t dest_stride = dsc->dest_stride;
548 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
549 int32_t src_stride = dsc->src_stride;
550 const lv_opa_t * mask_buf = dsc->mask_buf;
551 int32_t mask_stride = dsc->mask_stride;
552
553 lv_color_mix_alpha_cache_t cache;
554 lv_color_mix_with_alpha_cache_init(&cache);
555
556 int32_t x, y;
557
558 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
559 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
560 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88(dsc)) {
561 for(y = 0; y < h; y++) {
562 for(x = 0; x < w; x++) {
563 lv_color_16a_16a_mix(src_buf_al88[x], &dest_buf_al88[x], &cache);
564 }
565 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
566 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
567 }
568 }
569 }
570 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
571 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc)) {
572 for(y = 0; y < h; y++) {
573 for(x = 0; x < w; x++) {
574 lv_color16a_t src_color = src_buf_al88[x];
575 src_color.alpha = LV_OPA_MIX2(src_color.alpha, opa);
576 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
577 }
578 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
579 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
580 }
581 }
582 }
583 else if(mask_buf && opa >= LV_OPA_MAX) {
584 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc)) {
585 for(y = 0; y < h; y++) {
586 for(x = 0; x < w; x++) {
587 lv_color16a_t src_color = src_buf_al88[x];
588 src_color.alpha = LV_OPA_MIX2(src_color.alpha, mask_buf[x]);
589 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
590 }
591 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
592 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
593 mask_buf += mask_stride;
594 }
595 }
596 }
597 else if(mask_buf && opa < LV_OPA_MAX) {
598 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc)) {
599 for(y = 0; y < h; y++) {
600 for(x = 0; x < w; x++) {
601 lv_color16a_t src_color = src_buf_al88[x];
602 src_color.alpha = LV_OPA_MIX3(src_color.alpha, mask_buf[x], opa);
603 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
604 }
605 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
606 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
607 mask_buf += mask_stride;
608 }
609 }
610 }
611 }
612 else {
613 for(y = 0; y < h; y++) {
614 for(x = 0; x < w; x++) {
615 lv_color16a_t src_color = src_buf_al88[x];
616 if(mask_buf == NULL) src_color.alpha = LV_OPA_MIX2(src_color.alpha, opa);
617 else src_color.alpha = LV_OPA_MIX3(src_color.alpha, mask_buf[x], opa);
618 blend_non_normal_pixel(&dest_buf_al88[x], src_color, dsc->blend_mode, &cache);
619 }
620 if(mask_buf) mask_buf += mask_stride;
621 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
622 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
623 }
624 }
625 }
626
627 #if LV_DRAW_SW_SUPPORT_RGB565
628
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)629 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
630 {
631 int32_t w = dsc->dest_w;
632 int32_t h = dsc->dest_h;
633 lv_opa_t opa = dsc->opa;
634 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
635 int32_t dest_stride = dsc->dest_stride;
636 const lv_color16_t * src_buf_c16 = (const lv_color16_t *)dsc->src_buf;
637 int32_t src_stride = dsc->src_stride;
638 const lv_opa_t * mask_buf = dsc->mask_buf;
639 int32_t mask_stride = dsc->mask_stride;
640
641 lv_color_mix_alpha_cache_t cache;
642 lv_color_mix_with_alpha_cache_init(&cache);
643
644 int32_t x, y;
645
646 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
647 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
648 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88(dsc)) {
649 for(y = 0; y < h; y++) {
650 for(x = 0; x < w; x++) {
651 dest_buf_al88[x].lumi = lv_color16_luminance(src_buf_c16[x]);
652 dest_buf_al88[x].alpha = 255;
653 }
654 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
655 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
656 }
657 }
658 }
659 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
660 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc, dest_px_size)) {
661 for(y = 0; y < h; y++) {
662 for(x = 0; x < w; x++) {
663 lv_color16a_t src_color;
664 src_color.lumi = lv_color16_luminance(src_buf_c16[x]);
665 src_color.alpha = opa;
666 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
667 }
668 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
669 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
670 }
671 }
672 }
673 else if(mask_buf && opa >= LV_OPA_MAX) {
674 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc, dest_px_size)) {
675 for(y = 0; y < h; y++) {
676 for(x = 0; x < w; x++) {
677 lv_color16a_t src_color;
678 src_color.lumi = lv_color16_luminance(src_buf_c16[x]);
679 src_color.alpha = mask_buf[x];
680 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
681 }
682 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
683 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
684 mask_buf += mask_stride;
685 }
686 }
687 }
688 else {
689 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc, dest_px_size)) {
690 for(y = 0; y < h; y++) {
691 for(x = 0; x < w; x++) {
692 lv_color16a_t src_color;
693 src_color.lumi = lv_color16_luminance(src_buf_c16[x]);
694 src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
695 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
696 }
697 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
698 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
699 mask_buf += mask_stride;
700 }
701 }
702 }
703 }
704 else {
705 for(y = 0; y < h; y++) {
706 for(x = 0; x < w; x++) {
707 lv_color16a_t src_color;
708 src_color.lumi = lv_color16_luminance(src_buf_c16[x]);
709 if(mask_buf == NULL) src_color.alpha = opa;
710 else src_color.alpha = LV_OPA_MIX2(mask_buf[x], opa);
711 blend_non_normal_pixel(&dest_buf_al88[x], src_color, dsc->blend_mode, &cache);
712 }
713 if(mask_buf) mask_buf += mask_stride;
714 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
715 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
716 }
717 }
718 }
719
720 #endif
721
722 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
723
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t src_px_size)724 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
725 const uint8_t src_px_size)
726 {
727 int32_t w = dsc->dest_w;
728 int32_t h = dsc->dest_h;
729 lv_opa_t opa = dsc->opa;
730 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
731 int32_t dest_stride = dsc->dest_stride;
732 const uint8_t * src_buf_u8 = dsc->src_buf;
733 int32_t src_stride = dsc->src_stride;
734 const lv_opa_t * mask_buf = dsc->mask_buf;
735 int32_t mask_stride = dsc->mask_stride;
736
737 lv_color_mix_alpha_cache_t cache;
738 lv_color_mix_with_alpha_cache_init(&cache);
739
740 int32_t dest_x;
741 int32_t src_x;
742 int32_t y;
743
744 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
745 /*Special case*/
746 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
747 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88(dsc, src_px_size)) {
748 for(y = 0; y < h; y++) {
749 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
750 dest_buf_al88[dest_x].lumi = lv_color24_luminance(&src_buf_u8[src_x]);
751 dest_buf_al88[dest_x].alpha = 255;
752 }
753 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
754 src_buf_u8 += src_stride;
755 }
756 }
757 }
758 if(mask_buf == NULL && opa < LV_OPA_MAX) {
759 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc, dest_px_size, src_px_size)) {
760 for(y = 0; y < h; y++) {
761 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
762 lv_color16a_t src_color;
763 src_color.lumi = lv_color24_luminance(&src_buf_u8[src_x]);
764 src_color.alpha = opa;
765 lv_color_16a_16a_mix(src_color, &dest_buf_al88[dest_x], &cache);
766 }
767 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
768 src_buf_u8 += src_stride;
769 }
770 }
771 }
772 if(mask_buf && opa >= LV_OPA_MAX) {
773 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc, dest_px_size, src_px_size)) {
774 uint32_t mask_x;
775 for(y = 0; y < h; y++) {
776 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
777 lv_color16a_t src_color;
778 src_color.lumi = lv_color24_luminance(&src_buf_u8[src_x]);
779 src_color.alpha = mask_buf[mask_x];
780 lv_color_16a_16a_mix(src_color, &dest_buf_al88[dest_x], &cache);
781 }
782 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
783 src_buf_u8 += src_stride;
784 mask_buf += mask_stride;
785 }
786 }
787 }
788 if(mask_buf && opa < LV_OPA_MAX) {
789 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc, dest_px_size, src_px_size)) {
790 uint32_t mask_x;
791 for(y = 0; y < h; y++) {
792 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
793 lv_color16a_t src_color;
794 src_color.lumi = lv_color24_luminance(&src_buf_u8[src_x]);
795 src_color.alpha = LV_OPA_MIX2(mask_buf[mask_x], opa);
796 lv_color_16a_16a_mix(src_color, &dest_buf_al88[dest_x], &cache);
797 }
798 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
799 src_buf_u8 += src_stride;
800 mask_buf += mask_stride;
801 }
802 }
803 }
804 }
805 else {
806 for(y = 0; y < h; y++) {
807 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
808 lv_color16a_t src_color;
809 src_color.lumi = lv_color24_luminance(&src_buf_u8[src_x]);
810 if(mask_buf == NULL) src_color.alpha = opa;
811 else src_color.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
812
813 blend_non_normal_pixel(&dest_buf_al88[dest_x], src_color, dsc->blend_mode, &cache);
814 }
815 if(mask_buf) mask_buf += mask_stride;
816 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
817 src_buf_u8 += src_stride;
818 }
819 }
820 }
821
822 #endif
823
824 #if LV_DRAW_SW_SUPPORT_ARGB8888
825
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)826 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
827 {
828 int32_t w = dsc->dest_w;
829 int32_t h = dsc->dest_h;
830 lv_opa_t opa = dsc->opa;
831 lv_color16a_t * dest_buf_al88 = dsc->dest_buf;
832 int32_t dest_stride = dsc->dest_stride;
833 const lv_color32_t * src_buf_c32 = dsc->src_buf;
834 int32_t src_stride = dsc->src_stride;
835 const lv_opa_t * mask_buf = dsc->mask_buf;
836 int32_t mask_stride = dsc->mask_stride;
837
838 lv_color_mix_alpha_cache_t cache;
839 lv_color_mix_with_alpha_cache_init(&cache);
840
841 int32_t x;
842 int32_t y;
843
844 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
845 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
846 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88(dsc)) {
847 for(y = 0; y < h; y++) {
848 for(x = 0; x < w; x++) {
849 lv_color16a_t src_color;
850 src_color.lumi = lv_color32_luminance(src_buf_c32[x]);
851 src_color.alpha = src_buf_c32[x].alpha;
852 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
853 }
854 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
855 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
856 }
857 }
858 }
859 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
860 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_OPA(dsc)) {
861 for(y = 0; y < h; y++) {
862 for(x = 0; x < w; x++) {
863 lv_color16a_t src_color;
864 src_color.lumi = lv_color32_luminance(src_buf_c32[x]);
865 src_color.alpha = LV_OPA_MIX2(src_buf_c32[x].alpha, opa);
866 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
867 }
868 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
869 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
870 }
871 }
872 }
873 else if(mask_buf && opa >= LV_OPA_MAX) {
874 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_WITH_MASK(dsc)) {
875 for(y = 0; y < h; y++) {
876 for(x = 0; x < w; x++) {
877 lv_color16a_t src_color;
878 src_color.lumi = lv_color32_luminance(src_buf_c32[x]);
879 src_color.alpha = LV_OPA_MIX2(src_buf_c32[x].alpha, mask_buf[x]);
880 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
881 }
882 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
883 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
884 mask_buf += mask_stride;
885 }
886 }
887 }
888 else if(mask_buf && opa < LV_OPA_MAX) {
889 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_AL88_MIX_MASK_OPA(dsc)) {
890 for(y = 0; y < h; y++) {
891 for(x = 0; x < w; x++) {
892 lv_color16a_t src_color;
893 src_color.lumi = lv_color32_luminance(src_buf_c32[x]);
894 src_color.alpha = LV_OPA_MIX3(src_buf_c32[x].alpha, mask_buf[x], opa);
895 lv_color_16a_16a_mix(src_color, &dest_buf_al88[x], &cache);
896 }
897 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
898 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
899 mask_buf += mask_stride;
900 }
901 }
902 }
903 }
904 else {
905 for(y = 0; y < h; y++) {
906 for(x = 0; x < w; x++) {
907 lv_color16a_t src_color;
908 src_color.lumi = lv_color32_luminance(src_buf_c32[x]);
909 src_color.alpha = src_buf_c32[x].alpha;
910 if(mask_buf == NULL) src_color.alpha = LV_OPA_MIX2(src_color.alpha, opa);
911 else src_color.alpha = LV_OPA_MIX3(src_color.alpha, mask_buf[x], opa);
912 blend_non_normal_pixel(&dest_buf_al88[x], src_color, dsc->blend_mode, &cache);
913 }
914 if(mask_buf) mask_buf += mask_stride;
915 dest_buf_al88 = drawbuf_next_row(dest_buf_al88, dest_stride);
916 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
917 }
918 }
919 }
920
921 #endif
922
923 /**
924 * Check if two AL88 colors are equal
925 * @param c1 the first color
926 * @param c2 the second color
927 * @return true: equal
928 */
lv_color16a_eq(lv_color16a_t c1,lv_color16a_t c2)929 static inline bool lv_color16a_eq(lv_color16a_t c1, lv_color16a_t c2)
930 {
931 return *((uint16_t *)&c1) == *((uint16_t *)&c2);
932 }
933
lv_color_mix16a(lv_color16a_t fg,lv_color16a_t bg)934 static inline lv_color16a_t LV_ATTRIBUTE_FAST_MEM lv_color_mix16a(lv_color16a_t fg, lv_color16a_t bg)
935 {
936 #if 0
937 if(fg.alpha >= LV_OPA_MAX) {
938 fg.alpha = bg.alpha;
939 return fg;
940 }
941 if(fg.alpha <= LV_OPA_MIN) {
942 return bg;
943 }
944 #endif
945 bg.lumi = (uint32_t)((uint32_t)fg.lumi * fg.alpha + (uint32_t)bg.lumi * (255 - fg.alpha)) >> 8;
946 return bg;
947 }
948
lv_color_16a_16a_mix(lv_color16a_t fg,lv_color16a_t * bg,lv_color_mix_alpha_cache_t * cache)949 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_16a_16a_mix(lv_color16a_t fg, lv_color16a_t * bg,
950 lv_color_mix_alpha_cache_t * cache)
951 {
952 /*Pick the foreground if it's fully opaque or the Background is fully transparent*/
953 if(fg.alpha >= LV_OPA_MAX || bg->alpha <= LV_OPA_MIN) {
954 *bg = fg;
955 }
956 /*Transparent foreground: use the Background*/
957 else if(fg.alpha <= LV_OPA_MIN) {
958 /* no need to copy */
959 }
960 /*Opaque background: use simple mix*/
961 else if(bg->alpha == 255) {
962 *bg = lv_color_mix16a(fg, *bg);
963 }
964 /*Both colors have alpha. Expensive calculation needs to be applied*/
965 else {
966 /*Save the parameters and the result. If they will be asked again don't compute again*/
967
968 /*Update the ratio and the result alpha value if the input alpha values change*/
969 if(bg->alpha != cache->bg_saved.alpha || fg.alpha != cache->fg_saved.alpha) {
970 /*Info:
971 * https://en.wikipedia.org/wiki/Alpha_compositing#Analytical_derivation_of_the_over_operator*/
972 cache->res_alpha_saved = 255 - LV_OPA_MIX2(255 - fg.alpha, 255 - bg->alpha);
973 LV_ASSERT(cache->res_alpha_saved != 0);
974 cache->ratio_saved = (uint32_t)((uint32_t)fg.alpha * 255) / cache->res_alpha_saved;
975 }
976
977 if(!lv_color16a_eq(*bg, cache->bg_saved) || !lv_color16a_eq(fg, cache->fg_saved)) {
978 cache->fg_saved = fg;
979 cache->bg_saved = *bg;
980 fg.alpha = cache->ratio_saved;
981 cache->res_saved = lv_color_mix16a(fg, *bg);
982 cache->res_saved.alpha = cache->res_alpha_saved;
983 }
984
985 *bg = cache->res_saved;
986 }
987 }
988
lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache)989 void lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache)
990 {
991 lv_memzero(&cache->fg_saved, sizeof(lv_color16a_t));
992 lv_memzero(&cache->bg_saved, sizeof(lv_color16a_t));
993 lv_memzero(&cache->res_saved, sizeof(lv_color16a_t));
994 cache->res_alpha_saved = 255;
995 cache->ratio_saved = 255;
996 }
997
998 #if LV_DRAW_SW_SUPPORT_I1
999
get_bit(const uint8_t * buf,int32_t bit_idx)1000 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
1001 {
1002 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
1003 }
1004
1005 #endif
1006
blend_non_normal_pixel(lv_color16a_t * dest,lv_color16a_t src,lv_blend_mode_t mode,lv_color_mix_alpha_cache_t * cache)1007 static inline void LV_ATTRIBUTE_FAST_MEM blend_non_normal_pixel(lv_color16a_t * dest, lv_color16a_t src,
1008 lv_blend_mode_t mode, lv_color_mix_alpha_cache_t * cache)
1009 {
1010 lv_color16a_t res;
1011 switch(mode) {
1012 case LV_BLEND_MODE_ADDITIVE:
1013 res.lumi = LV_MIN(dest->lumi + src.lumi, 255);
1014 break;
1015 case LV_BLEND_MODE_SUBTRACTIVE:
1016 res.lumi = LV_MAX(dest->lumi - src.lumi, 0);
1017 break;
1018 case LV_BLEND_MODE_MULTIPLY:
1019 res.lumi = (dest->lumi * src.lumi) >> 8;
1020 break;
1021 default:
1022 LV_LOG_WARN("Not supported blend mode: %d", mode);
1023 return;
1024 }
1025 res.alpha = src.alpha;
1026 lv_color_16a_16a_mix(res, dest, cache);
1027 }
1028
drawbuf_next_row(const void * buf,uint32_t stride)1029 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
1030 {
1031 return (void *)((uint8_t *)buf + stride);
1032 }
1033
1034 #endif
1035
1036 #endif
1037