1 /**
2 * @file lv_draw_sw_blend.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_argb8888.h"
10 #if LV_USE_DRAW_SW
11
12 #if LV_DRAW_SW_SUPPORT_ARGB8888
13
14 #include "lv_draw_sw_blend_private.h"
15 #include "../../../misc/lv_math.h"
16 #include "../../../display/lv_display.h"
17 #include "../../../core/lv_refr.h"
18 #include "../../../misc/lv_color.h"
19 #include "../../../stdlib/lv_string.h"
20
21 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
22 #include "neon/lv_blend_neon.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
24 #include "helium/lv_blend_helium.h"
25 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
26 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
27 #endif
28
29 /*********************
30 * DEFINES
31 *********************/
32
33 /**********************
34 * TYPEDEFS
35 **********************/
36
37 typedef struct {
38 lv_color32_t fg_saved;
39 lv_color32_t bg_saved;
40 lv_color32_t res_saved;
41 lv_opa_t res_alpha_saved;
42 lv_opa_t ratio_saved;
43 } lv_color_mix_alpha_cache_t;
44
45 /**********************
46 * STATIC PROTOTYPES
47 **********************/
48
49 #if LV_DRAW_SW_SUPPORT_AL88
50 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
51 #endif
52
53 #if LV_DRAW_SW_SUPPORT_I1
54 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
55
56 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
57 #endif
58
59 #if LV_DRAW_SW_SUPPORT_L8
60 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
61 #endif
62
63 #if LV_DRAW_SW_SUPPORT_RGB565
64 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
65 #endif
66
67 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
68 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
69 const uint8_t src_px_size);
70 #endif
71
72 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
73
74 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_8_32_mix(const uint8_t src, lv_color32_t * dest, uint8_t mix);
75
76 static inline lv_color32_t /* LV_ATTRIBUTE_FAST_MEM */ lv_color_32_32_mix(lv_color32_t fg, lv_color32_t bg,
77 lv_color_mix_alpha_cache_t * cache);
78
79 static void lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache);
80
81 static inline void /* LV_ATTRIBUTE_FAST_MEM */ blend_non_normal_pixel(lv_color32_t * dest, lv_color32_t src,
82 lv_blend_mode_t mode, lv_color_mix_alpha_cache_t * cache);
83 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
84
85 /**********************
86 * STATIC VARIABLES
87 **********************/
88
89 /**********************
90 * MACROS
91 **********************/
92
93 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888
94 #define LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888(...) LV_RESULT_INVALID
95 #endif
96
97 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_OPA
98 #define LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
99 #endif
100
101 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_MASK
102 #define LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
103 #endif
104
105 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_MIX_MASK_OPA
106 #define LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
107 #endif
108
109 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888
110 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
111 #endif
112
113 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
114 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
115 #endif
116
117 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
118 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
119 #endif
120
121 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
122 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
123 #endif
124
125 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888
126 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
127 #endif
128
129 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
130 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
131 #endif
132
133 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
134 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
135 #endif
136
137 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
138 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
139 #endif
140
141 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888
142 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
143 #endif
144
145 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
146 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
147 #endif
148
149 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
150 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
151 #endif
152
153 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
154 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
155 #endif
156
157 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888
158 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
159 #endif
160
161 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
162 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
163 #endif
164
165 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
166 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
167 #endif
168
169 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
170 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
171 #endif
172
173 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888
174 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
175 #endif
176
177 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
178 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
179 #endif
180
181 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
182 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
183 #endif
184
185 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
186 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
187 #endif
188
189 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888
190 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888(...) LV_RESULT_INVALID
191 #endif
192
193 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_OPA
194 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(...) LV_RESULT_INVALID
195 #endif
196
197 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_MASK
198 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(...) LV_RESULT_INVALID
199 #endif
200
201 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA
202 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(...) LV_RESULT_INVALID
203 #endif
204
205 /**********************
206 * GLOBAL FUNCTIONS
207 **********************/
208
lv_draw_sw_blend_color_to_argb8888(lv_draw_sw_blend_fill_dsc_t * dsc)209 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_argb8888(lv_draw_sw_blend_fill_dsc_t * dsc)
210 {
211 int32_t w = dsc->dest_w;
212 int32_t h = dsc->dest_h;
213 lv_opa_t opa = dsc->opa;
214 const lv_opa_t * mask = dsc->mask_buf;
215 int32_t mask_stride = dsc->mask_stride;
216 int32_t dest_stride = dsc->dest_stride;
217
218 lv_color_mix_alpha_cache_t cache;
219 lv_color_mix_with_alpha_cache_init(&cache);
220
221 int32_t x;
222 int32_t y;
223
224 LV_UNUSED(w);
225 LV_UNUSED(h);
226 LV_UNUSED(x);
227 LV_UNUSED(y);
228 LV_UNUSED(opa);
229 LV_UNUSED(mask);
230 LV_UNUSED(mask_stride);
231 LV_UNUSED(dest_stride);
232
233 /*Simple fill*/
234 if(mask == NULL && opa >= LV_OPA_MAX) {
235 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888(dsc)) {
236 uint32_t color32 = lv_color_to_u32(dsc->color);
237 uint32_t * dest_buf = dsc->dest_buf;
238 for(y = 0; y < h; y++) {
239 for(x = 0; x < w - 16; x += 16) {
240 dest_buf[x + 0] = color32;
241 dest_buf[x + 1] = color32;
242 dest_buf[x + 2] = color32;
243 dest_buf[x + 3] = color32;
244
245 dest_buf[x + 4] = color32;
246 dest_buf[x + 5] = color32;
247 dest_buf[x + 6] = color32;
248 dest_buf[x + 7] = color32;
249
250 dest_buf[x + 8] = color32;
251 dest_buf[x + 9] = color32;
252 dest_buf[x + 10] = color32;
253 dest_buf[x + 11] = color32;
254
255 dest_buf[x + 12] = color32;
256 dest_buf[x + 13] = color32;
257 dest_buf[x + 14] = color32;
258 dest_buf[x + 15] = color32;
259 }
260 for(; x < w; x ++) {
261 dest_buf[x] = color32;
262 }
263
264 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
265 }
266 }
267
268 }
269 /*Opacity only*/
270 else if(mask == NULL && opa < LV_OPA_MAX) {
271 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_OPA(dsc)) {
272 lv_color32_t color_argb = lv_color_to_32(dsc->color, opa);
273 lv_color32_t * dest_buf = dsc->dest_buf;
274
275 for(y = 0; y < h; y++) {
276 for(x = 0; x < w; x++) {
277 dest_buf[x] = lv_color_32_32_mix(color_argb, dest_buf[x], &cache);
278 }
279 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
280 }
281 }
282
283 }
284 /*Masked with full opacity*/
285 else if(mask && opa >= LV_OPA_MAX) {
286 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_WITH_MASK(dsc)) {
287 lv_color32_t color_argb = lv_color_to_32(dsc->color, 0xff);
288 lv_color32_t * dest_buf = dsc->dest_buf;
289 for(y = 0; y < h; y++) {
290 for(x = 0; x < w; x++) {
291 color_argb.alpha = mask[x];
292 dest_buf[x] = lv_color_32_32_mix(color_argb, dest_buf[x], &cache);
293 }
294
295 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
296 mask += mask_stride;
297 }
298 }
299
300 }
301 /*Masked with opacity*/
302 else {
303 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
304 lv_color32_t color_argb = lv_color_to_32(dsc->color, opa);
305 lv_color32_t * dest_buf = dsc->dest_buf;
306 for(y = 0; y < h; y++) {
307 for(x = 0; x < w; x++) {
308 color_argb.alpha = LV_OPA_MIX2(mask[x], opa);
309 dest_buf[x] = lv_color_32_32_mix(color_argb, dest_buf[x], &cache);
310 }
311 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
312 mask += mask_stride;
313 }
314 }
315 }
316 }
317
lv_draw_sw_blend_image_to_argb8888(lv_draw_sw_blend_image_dsc_t * dsc)318 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_argb8888(lv_draw_sw_blend_image_dsc_t * dsc)
319 {
320 switch(dsc->src_color_format) {
321 #if LV_DRAW_SW_SUPPORT_RGB565
322 case LV_COLOR_FORMAT_RGB565:
323 rgb565_image_blend(dsc);
324 break;
325 #endif
326 #if LV_DRAW_SW_SUPPORT_RGB888
327 case LV_COLOR_FORMAT_RGB888:
328 rgb888_image_blend(dsc, 3);
329 break;
330 #endif
331 #if LV_DRAW_SW_SUPPORT_XRGB8888
332 case LV_COLOR_FORMAT_XRGB8888:
333 rgb888_image_blend(dsc, 4);
334 break;
335 #endif
336 case LV_COLOR_FORMAT_ARGB8888:
337 argb8888_image_blend(dsc);
338 break;
339 #if LV_DRAW_SW_SUPPORT_L8
340 case LV_COLOR_FORMAT_L8:
341 l8_image_blend(dsc);
342 break;
343 #endif
344 #if LV_DRAW_SW_SUPPORT_AL88
345 case LV_COLOR_FORMAT_AL88:
346 al88_image_blend(dsc);
347 break;
348 #endif
349 #if LV_DRAW_SW_SUPPORT_I1
350 case LV_COLOR_FORMAT_I1:
351 i1_image_blend(dsc);
352 break;
353 #endif
354 default:
355 LV_LOG_WARN("Not supported source color format");
356 break;
357 }
358 }
359
360 /**********************
361 * STATIC FUNCTIONS
362 **********************/
363
364 #if LV_DRAW_SW_SUPPORT_I1
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)365 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
366 {
367 int32_t w = dsc->dest_w;
368 int32_t h = dsc->dest_h;
369 lv_opa_t opa = dsc->opa;
370 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
371 int32_t dest_stride = dsc->dest_stride;
372 const uint8_t * src_buf_i1 = dsc->src_buf;
373 int32_t src_stride = dsc->src_stride;
374 const lv_opa_t * mask_buf = dsc->mask_buf;
375 int32_t mask_stride = dsc->mask_stride;
376
377 int32_t dest_x;
378 int32_t src_x;
379 int32_t y;
380
381 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
382 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
383 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888(dsc)) {
384 for(y = 0; y < h; y++) {
385 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
386 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
387 dest_buf_c32[dest_x].alpha = chan_val;
388 dest_buf_c32[dest_x].red = chan_val;
389 dest_buf_c32[dest_x].green = chan_val;
390 dest_buf_c32[dest_x].blue = chan_val;
391 }
392 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
393 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
394 }
395 }
396 }
397 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
398 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc)) {
399 for(y = 0; y < h; y++) {
400 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
401 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
402 lv_color_8_32_mix(chan_val, &dest_buf_c32[dest_x], opa);
403 }
404 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
405 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
406 }
407 }
408 }
409 else if(mask_buf && opa >= LV_OPA_MAX) {
410 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc)) {
411 for(y = 0; y < h; y++) {
412 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
413 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
414 lv_color_8_32_mix(chan_val, &dest_buf_c32[dest_x], mask_buf[src_x]);
415 }
416 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
417 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
418 mask_buf += mask_stride;
419 }
420 }
421 }
422 else if(mask_buf && opa < LV_OPA_MAX) {
423 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
424 for(y = 0; y < h; y++) {
425 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
426 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
427 lv_color_8_32_mix(chan_val, &dest_buf_c32[dest_x], LV_OPA_MIX2(mask_buf[src_x], opa));
428 }
429 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
430 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
431 mask_buf += mask_stride;
432 }
433 }
434 }
435 }
436 else {
437 lv_color32_t src_argb;
438 lv_color_mix_alpha_cache_t cache;
439 lv_color_mix_with_alpha_cache_init(&cache);
440 for(y = 0; y < h; y++) {
441 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
442 src_argb.red = get_bit(src_buf_i1, src_x) * 255;
443 src_argb.green = src_argb.red;
444 src_argb.blue = src_argb.red;
445 if(mask_buf == NULL) src_argb.alpha = opa;
446 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
447 blend_non_normal_pixel(&dest_buf_c32[dest_x], src_argb, dsc->blend_mode, &cache);
448 }
449 if(mask_buf) mask_buf += mask_stride;
450 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
451 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
452 }
453 }
454 }
455 #endif
456
457 #if LV_DRAW_SW_SUPPORT_AL88
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)458 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
459 {
460 int32_t w = dsc->dest_w;
461 int32_t h = dsc->dest_h;
462 lv_opa_t opa = dsc->opa;
463 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
464 int32_t dest_stride = dsc->dest_stride;
465 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
466 int32_t src_stride = dsc->src_stride;
467 const lv_opa_t * mask_buf = dsc->mask_buf;
468 int32_t mask_stride = dsc->mask_stride;
469
470 int32_t dest_x;
471 int32_t src_x;
472 int32_t y;
473
474 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
475 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
476 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888(dsc)) {
477 for(y = 0; y < h; y++) {
478 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
479 /*
480 dest_buf_c32[dest_x].alpha = src_buf_al88[src_x].alpha;
481 dest_buf_c32[dest_x].red = src_buf_al88[src_x].lumi;
482 dest_buf_c32[dest_x].green = src_buf_al88[src_x].lumi;
483 dest_buf_c32[dest_x].blue = src_buf_al88[src_x].lumi;
484 */
485 lv_color_8_32_mix(src_buf_al88[src_x].lumi, &dest_buf_c32[dest_x], src_buf_al88[src_x].alpha);
486 }
487 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
488 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
489 }
490 }
491 }
492 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
493 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc)) {
494 for(y = 0; y < h; y++) {
495 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
496 lv_color_8_32_mix(src_buf_al88[src_x].lumi, &dest_buf_c32[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa));
497 }
498 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
499 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
500 }
501 }
502 }
503 else if(mask_buf && opa >= LV_OPA_MAX) {
504 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc)) {
505 for(y = 0; y < h; y++) {
506 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
507 lv_color_8_32_mix(src_buf_al88[src_x].lumi, &dest_buf_c32[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha,
508 mask_buf[src_x]));
509 }
510 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
511 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
512 mask_buf += mask_stride;
513 }
514 }
515 }
516 else if(mask_buf && opa < LV_OPA_MAX) {
517 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
518 for(y = 0; y < h; y++) {
519 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
520 lv_color_8_32_mix(src_buf_al88[src_x].lumi, &dest_buf_c32[dest_x], LV_OPA_MIX3(src_buf_al88[src_x].alpha,
521 mask_buf[src_x], opa));
522 }
523 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
524 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
525 mask_buf += mask_stride;
526 }
527 }
528 }
529 }
530 else {
531 lv_color32_t src_argb;
532 lv_color_mix_alpha_cache_t cache;
533 lv_color_mix_with_alpha_cache_init(&cache);
534 for(y = 0; y < h; y++) {
535 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
536 src_argb.red = src_buf_al88[src_x].lumi;
537 src_argb.green = src_buf_al88[src_x].lumi;
538 src_argb.blue = src_buf_al88[src_x].lumi;
539 if(mask_buf == NULL) src_argb.alpha = LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa);
540 else src_argb.alpha = LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[dest_x], opa);
541 blend_non_normal_pixel(&dest_buf_c32[dest_x], src_argb, dsc->blend_mode, &cache);
542 }
543 if(mask_buf) mask_buf += mask_stride;
544 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
545 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
546 }
547 }
548 }
549
550 #endif
551
552 #if LV_DRAW_SW_SUPPORT_L8
553
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)554 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
555 {
556 int32_t w = dsc->dest_w;
557 int32_t h = dsc->dest_h;
558 lv_opa_t opa = dsc->opa;
559 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
560 int32_t dest_stride = dsc->dest_stride;
561 const uint8_t * src_buf_l8 = dsc->src_buf;
562 int32_t src_stride = dsc->src_stride;
563 const lv_opa_t * mask_buf = dsc->mask_buf;
564 int32_t mask_stride = dsc->mask_stride;
565
566 int32_t dest_x;
567 int32_t src_x;
568 int32_t y;
569
570 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
571 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
572 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888(dsc)) {
573 for(y = 0; y < h; y++) {
574 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
575 dest_buf_c32[dest_x].alpha = src_buf_l8[src_x];
576 dest_buf_c32[dest_x].red = src_buf_l8[src_x];
577 dest_buf_c32[dest_x].green = src_buf_l8[src_x];
578 dest_buf_c32[dest_x].blue = src_buf_l8[src_x];
579 }
580 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
581 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
582 }
583 }
584 }
585 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
586 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc)) {
587 for(y = 0; y < h; y++) {
588 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
589 lv_color_8_32_mix(src_buf_l8[src_x], &dest_buf_c32[dest_x], opa);
590 }
591 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
592 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
593 }
594 }
595 }
596 else if(mask_buf && opa >= LV_OPA_MAX) {
597 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc)) {
598 for(y = 0; y < h; y++) {
599 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
600 lv_color_8_32_mix(src_buf_l8[src_x], &dest_buf_c32[dest_x], mask_buf[src_x]);
601 }
602 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
603 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
604 mask_buf += mask_stride;
605 }
606 }
607 }
608 else if(mask_buf && opa < LV_OPA_MAX) {
609 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
610 for(y = 0; y < h; y++) {
611 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
612 lv_color_8_32_mix(src_buf_l8[src_x], &dest_buf_c32[dest_x], LV_OPA_MIX2(mask_buf[src_x], opa));
613 }
614 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
615 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
616 mask_buf += mask_stride;
617 }
618 }
619 }
620 }
621 else {
622 lv_color32_t src_argb;
623 lv_color_mix_alpha_cache_t cache;
624 lv_color_mix_with_alpha_cache_init(&cache);
625 for(y = 0; y < h; y++) {
626 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
627 src_argb.red = src_buf_l8[src_x];
628 src_argb.green = src_buf_l8[src_x];
629 src_argb.blue = src_buf_l8[src_x];
630 if(mask_buf == NULL) src_argb.alpha = opa;
631 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
632 blend_non_normal_pixel(&dest_buf_c32[dest_x], src_argb, dsc->blend_mode, &cache);
633 }
634 if(mask_buf) mask_buf += mask_stride;
635 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
636 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
637 }
638 }
639 }
640
641 #endif
642
643 #if LV_DRAW_SW_SUPPORT_RGB565
644
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)645 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
646 {
647 int32_t w = dsc->dest_w;
648 int32_t h = dsc->dest_h;
649 lv_opa_t opa = dsc->opa;
650 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
651 int32_t dest_stride = dsc->dest_stride;
652 const lv_color16_t * src_buf_c16 = (const lv_color16_t *) dsc->src_buf;
653 int32_t src_stride = dsc->src_stride;
654 const lv_opa_t * mask_buf = dsc->mask_buf;
655 int32_t mask_stride = dsc->mask_stride;
656
657 lv_color32_t color_argb;
658 lv_color_mix_alpha_cache_t cache;
659 lv_color_mix_with_alpha_cache_init(&cache);
660
661 int32_t x;
662 int32_t y;
663
664 LV_UNUSED(color_argb);
665
666 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
667 if(mask_buf == NULL) {
668 lv_result_t accelerated;
669 if(opa >= LV_OPA_MAX) {
670 accelerated = LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888(dsc);
671 }
672 else {
673 accelerated = LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc);
674 }
675 if(LV_RESULT_INVALID == accelerated) {
676 color_argb.alpha = opa;
677 for(y = 0; y < h; y++) {
678 for(x = 0; x < w; x++) {
679 color_argb.red = (src_buf_c16[x].red * 2106) >> 8; /*To make it rounded*/
680 color_argb.green = (src_buf_c16[x].green * 1037) >> 8;
681 color_argb.blue = (src_buf_c16[x].blue * 2106) >> 8;
682 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
683 }
684 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
685 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
686 }
687 }
688 }
689 else if(mask_buf && opa >= LV_OPA_MAX) {
690 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc)) {
691 for(y = 0; y < h; y++) {
692 for(x = 0; x < w; x++) {
693 color_argb.alpha = mask_buf[x];
694 color_argb.red = (src_buf_c16[x].red * 2106) >> 8; /*To make it rounded*/
695 color_argb.green = (src_buf_c16[x].green * 1037) >> 8;
696 color_argb.blue = (src_buf_c16[x].blue * 2106) >> 8;
697 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
698 }
699 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
700 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
701 mask_buf += mask_stride;
702 }
703 }
704 }
705 else {
706 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
707 for(y = 0; y < h; y++) {
708 for(x = 0; x < w; x++) {
709 color_argb.alpha = LV_OPA_MIX2(mask_buf[x], opa);
710 color_argb.red = (src_buf_c16[x].red * 2106) >> 8; /*To make it rounded*/
711 color_argb.green = (src_buf_c16[x].green * 1037) >> 8;
712 color_argb.blue = (src_buf_c16[x].blue * 2106) >> 8;
713 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
714 }
715 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
716 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
717 mask_buf += mask_stride;
718 }
719 }
720 }
721 }
722 else {
723 lv_color32_t src_argb;
724 for(y = 0; y < h; y++) {
725 for(x = 0; x < w; x++) {
726 src_argb.red = (src_buf_c16[x].red * 2106) >> 8;
727 src_argb.green = (src_buf_c16[x].green * 1037) >> 8;
728 src_argb.blue = (src_buf_c16[x].blue * 2106) >> 8;
729 if(mask_buf == NULL) src_argb.alpha = opa;
730 else src_argb.alpha = LV_OPA_MIX2(mask_buf[x], opa);
731 blend_non_normal_pixel(&dest_buf_c32[x], src_argb, dsc->blend_mode, &cache);
732 }
733 if(mask_buf) mask_buf += mask_stride;
734 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
735 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
736 }
737 }
738 }
739
740 #endif
741
742 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
743
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t src_px_size)744 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, const uint8_t src_px_size)
745 {
746
747 int32_t w = dsc->dest_w;
748 int32_t h = dsc->dest_h;
749 lv_opa_t opa = dsc->opa;
750 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
751 int32_t dest_stride = dsc->dest_stride;
752 const uint8_t * src_buf = dsc->src_buf;
753 int32_t src_stride = dsc->src_stride;
754 const lv_opa_t * mask_buf = dsc->mask_buf;
755 int32_t mask_stride = dsc->mask_stride;
756
757 lv_color32_t color_argb;
758 lv_color_mix_alpha_cache_t cache;
759 lv_color_mix_with_alpha_cache_init(&cache);
760
761 int32_t dest_x;
762 int32_t src_x;
763 int32_t y;
764
765 LV_UNUSED(color_argb);
766
767 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
768 /*Special case*/
769 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
770 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888(dsc, src_px_size)) {
771 if(src_px_size == 4) {
772 uint32_t line_in_bytes = w * 4;
773 for(y = 0; y < h; y++) {
774 lv_memcpy(dest_buf_c32, src_buf, line_in_bytes);
775 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
776 src_buf = drawbuf_next_row(src_buf, src_stride);
777 }
778 }
779 else if(src_px_size == 3) {
780 for(y = 0; y < h; y++) {
781 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 3) {
782 dest_buf_c32[dest_x].red = src_buf[src_x + 2];
783 dest_buf_c32[dest_x].green = src_buf[src_x + 1];
784 dest_buf_c32[dest_x].blue = src_buf[src_x + 0];
785 dest_buf_c32[dest_x].alpha = 0xff;
786 }
787 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
788 src_buf = drawbuf_next_row(src_buf, src_stride);
789 }
790 }
791 }
792
793 }
794 if(mask_buf == NULL && opa < LV_OPA_MAX) {
795 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc, src_px_size)) {
796 color_argb.alpha = opa;
797 for(y = 0; y < h; y++) {
798 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
799 color_argb.red = src_buf[src_x + 2];
800 color_argb.green = src_buf[src_x + 1];
801 color_argb.blue = src_buf[src_x + 0];
802 dest_buf_c32[dest_x] = lv_color_32_32_mix(color_argb, dest_buf_c32[dest_x], &cache);
803 }
804 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
805 src_buf = drawbuf_next_row(src_buf, src_stride);
806 }
807 }
808
809 }
810 if(mask_buf && opa >= LV_OPA_MAX) {
811 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc, src_px_size)) {
812 for(y = 0; y < h; y++) {
813 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
814 color_argb.alpha = mask_buf[dest_x];
815 color_argb.red = src_buf[src_x + 2];
816 color_argb.green = src_buf[src_x + 1];
817 color_argb.blue = src_buf[src_x + 0];
818 dest_buf_c32[dest_x] = lv_color_32_32_mix(color_argb, dest_buf_c32[dest_x], &cache);
819 }
820 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
821 src_buf = drawbuf_next_row(src_buf, src_stride);
822 mask_buf += mask_stride;
823 }
824 }
825 }
826 if(mask_buf && opa < LV_OPA_MAX) {
827 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc, src_px_size)) {
828 for(y = 0; y < h; y++) {
829 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
830 color_argb.alpha = (opa * mask_buf[dest_x]) >> 8;
831 color_argb.red = src_buf[src_x + 2];
832 color_argb.green = src_buf[src_x + 1];
833 color_argb.blue = src_buf[src_x + 0];
834 dest_buf_c32[dest_x] = lv_color_32_32_mix(color_argb, dest_buf_c32[dest_x], &cache);
835 }
836 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
837 src_buf = drawbuf_next_row(src_buf, src_stride);
838 mask_buf += mask_stride;
839 }
840 }
841 }
842 }
843 else {
844 lv_color32_t src_argb;
845 for(y = 0; y < h; y++) {
846 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
847 src_argb.red = src_buf[src_x + 2];
848 src_argb.green = src_buf[src_x + 1];
849 src_argb.blue = src_buf[src_x + 0];
850 if(mask_buf == NULL) src_argb.alpha = opa;
851 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
852
853 blend_non_normal_pixel(&dest_buf_c32[dest_x], src_argb, dsc->blend_mode, &cache);
854 }
855 if(mask_buf) mask_buf += mask_stride;
856 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
857 src_buf = drawbuf_next_row(src_buf, src_stride);
858 }
859 }
860 }
861
862 #endif
863
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)864 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
865 {
866 int32_t w = dsc->dest_w;
867 int32_t h = dsc->dest_h;
868 lv_opa_t opa = dsc->opa;
869 lv_color32_t * dest_buf_c32 = dsc->dest_buf;
870 int32_t dest_stride = dsc->dest_stride;
871 const lv_color32_t * src_buf_c32 = dsc->src_buf;
872 int32_t src_stride = dsc->src_stride;
873 const lv_opa_t * mask_buf = dsc->mask_buf;
874 int32_t mask_stride = dsc->mask_stride;
875
876 lv_color32_t color_argb;
877 lv_color_mix_alpha_cache_t cache;
878 lv_color_mix_with_alpha_cache_init(&cache);
879
880 int32_t x;
881 int32_t y;
882
883 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
884 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
885 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888(dsc)) {
886 for(y = 0; y < h; y++) {
887 for(x = 0; x < w; x++) {
888 dest_buf_c32[x] = lv_color_32_32_mix(src_buf_c32[x], dest_buf_c32[x], &cache);
889 }
890 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
891 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
892 }
893 }
894 }
895 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
896 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_OPA(dsc)) {
897 for(y = 0; y < h; y++) {
898 for(x = 0; x < w; x++) {
899 color_argb = src_buf_c32[x];
900 color_argb.alpha = LV_OPA_MIX2(color_argb.alpha, opa);
901 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
902 }
903 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
904 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
905 }
906 }
907 }
908 else if(mask_buf && opa >= LV_OPA_MAX) {
909 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_WITH_MASK(dsc)) {
910 for(y = 0; y < h; y++) {
911 for(x = 0; x < w; x++) {
912 color_argb = src_buf_c32[x];
913 color_argb.alpha = LV_OPA_MIX2(color_argb.alpha, mask_buf[x]);
914 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
915 }
916 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
917 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
918 mask_buf += mask_stride;
919 }
920 }
921 }
922 else if(mask_buf && opa < LV_OPA_MAX) {
923 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_ARGB8888_MIX_MASK_OPA(dsc)) {
924 for(y = 0; y < h; y++) {
925 for(x = 0; x < w; x++) {
926 color_argb = src_buf_c32[x];
927 color_argb.alpha = LV_OPA_MIX3(color_argb.alpha, opa, mask_buf[x]);
928 dest_buf_c32[x] = lv_color_32_32_mix(color_argb, dest_buf_c32[x], &cache);
929 }
930 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
931 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
932 mask_buf += mask_stride;
933 }
934 }
935 }
936 }
937 else {
938 for(y = 0; y < h; y++) {
939 for(x = 0; x < w; x++) {
940 color_argb = src_buf_c32[x];
941 if(mask_buf == NULL) color_argb.alpha = LV_OPA_MIX2(color_argb.alpha, opa);
942 else color_argb.alpha = LV_OPA_MIX3(color_argb.alpha, mask_buf[x], opa);
943 blend_non_normal_pixel(&dest_buf_c32[x], color_argb, dsc->blend_mode, &cache);
944 }
945 if(mask_buf) mask_buf += mask_stride;
946 dest_buf_c32 = drawbuf_next_row(dest_buf_c32, dest_stride);
947 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
948 }
949 }
950 }
951
lv_color_8_32_mix(const uint8_t src,lv_color32_t * dest,uint8_t mix)952 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_8_32_mix(const uint8_t src, lv_color32_t * dest, uint8_t mix)
953 {
954
955 if(mix == 0) return;
956
957 dest->alpha = 255;
958 if(mix >= LV_OPA_MAX) {
959 dest->red = src;
960 dest->green = src;
961 dest->blue = src;
962 }
963 else {
964 lv_opa_t mix_inv = 255 - mix;
965 dest->red = (uint32_t)((uint32_t)src * mix + dest->red * mix_inv) >> 8;
966 dest->green = (uint32_t)((uint32_t)src * mix + dest->green * mix_inv) >> 8;
967 dest->blue = (uint32_t)((uint32_t)src * mix + dest->blue * mix_inv) >> 8;
968 }
969 }
970
lv_color_32_32_mix(lv_color32_t fg,lv_color32_t bg,lv_color_mix_alpha_cache_t * cache)971 static inline lv_color32_t LV_ATTRIBUTE_FAST_MEM lv_color_32_32_mix(lv_color32_t fg, lv_color32_t bg,
972 lv_color_mix_alpha_cache_t * cache)
973 {
974 /*Pick the foreground if it's fully opaque or the Background is fully transparent*/
975 if(fg.alpha >= LV_OPA_MAX || bg.alpha <= LV_OPA_MIN) {
976 return fg;
977 }
978 /*Transparent foreground: use the Background*/
979 else if(fg.alpha <= LV_OPA_MIN) {
980 return bg;
981 }
982 /*Opaque background: use simple mix*/
983 else if(bg.alpha == 255) {
984 return lv_color_mix32(fg, bg);
985 }
986 /*Both colors have alpha. Expensive calculation need to be applied*/
987 else {
988 /*Save the parameters and the result. If they will be asked again don't compute again*/
989
990 /*Update the ratio and the result alpha value if the input alpha values change*/
991 if(bg.alpha != cache->bg_saved.alpha || fg.alpha != cache->fg_saved.alpha) {
992 /*Info:
993 * https://en.wikipedia.org/wiki/Alpha_compositing#Analytical_derivation_of_the_over_operator*/
994 cache->res_alpha_saved = 255 - LV_OPA_MIX2(255 - fg.alpha, 255 - bg.alpha);
995 LV_ASSERT(cache->res_alpha_saved != 0);
996 cache->ratio_saved = (uint32_t)((uint32_t)fg.alpha * 255) / cache->res_alpha_saved;
997 }
998
999 if(!lv_color32_eq(bg, cache->bg_saved) || !lv_color32_eq(fg, cache->fg_saved)) {
1000 cache->fg_saved = fg;
1001 cache->bg_saved = bg;
1002 fg.alpha = cache->ratio_saved;
1003 cache->res_saved = lv_color_mix32(fg, bg);
1004 cache->res_saved.alpha = cache->res_alpha_saved;
1005 }
1006
1007 return cache->res_saved;
1008 }
1009 }
1010
lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache)1011 void lv_color_mix_with_alpha_cache_init(lv_color_mix_alpha_cache_t * cache)
1012 {
1013 lv_memzero(&cache->fg_saved, sizeof(lv_color32_t));
1014 lv_memzero(&cache->bg_saved, sizeof(lv_color32_t));
1015 lv_memzero(&cache->res_saved, sizeof(lv_color32_t));
1016 cache->res_alpha_saved = 255;
1017 cache->ratio_saved = 255;
1018 }
1019
1020 #if LV_DRAW_SW_SUPPORT_I1
1021
get_bit(const uint8_t * buf,int32_t bit_idx)1022 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
1023 {
1024 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
1025 }
1026
1027 #endif
1028
blend_non_normal_pixel(lv_color32_t * dest,lv_color32_t src,lv_blend_mode_t mode,lv_color_mix_alpha_cache_t * cache)1029 static inline void LV_ATTRIBUTE_FAST_MEM blend_non_normal_pixel(lv_color32_t * dest, lv_color32_t src,
1030 lv_blend_mode_t mode, lv_color_mix_alpha_cache_t * cache)
1031 {
1032 lv_color32_t res;
1033 switch(mode) {
1034 case LV_BLEND_MODE_ADDITIVE:
1035 res.red = LV_MIN(dest->red + src.red, 255);
1036 res.green = LV_MIN(dest->green + src.green, 255);
1037 res.blue = LV_MIN(dest->blue + src.blue, 255);
1038 break;
1039 case LV_BLEND_MODE_SUBTRACTIVE:
1040 res.red = LV_MAX(dest->red - src.red, 0);
1041 res.green = LV_MAX(dest->green - src.green, 0);
1042 res.blue = LV_MAX(dest->blue - src.blue, 0);
1043 break;
1044 case LV_BLEND_MODE_MULTIPLY:
1045 res.red = (dest->red * src.red) >> 8;
1046 res.green = (dest->green * src.green) >> 8;
1047 res.blue = (dest->blue * src.blue) >> 8;
1048 break;
1049 default:
1050 LV_LOG_WARN("Not supported blend mode: %d", mode);
1051 return;
1052 }
1053 res.alpha = src.alpha;
1054 *dest = lv_color_32_32_mix(res, *dest, cache);
1055 }
1056
drawbuf_next_row(const void * buf,uint32_t stride)1057 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
1058 {
1059 return (void *)((uint8_t *)buf + stride);
1060 }
1061
1062 #endif
1063
1064 #endif
1065