1 /**
2 * @file lv_draw_sw_blend_l8.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_l8.h"
10 #if LV_USE_DRAW_SW
11
12 #if LV_DRAW_SW_SUPPORT_L8
13
14 #include "lv_draw_sw_blend_private.h"
15 #include "../../../misc/lv_math.h"
16 #include "../../../display/lv_display.h"
17 #include "../../../core/lv_refr.h"
18 #include "../../../misc/lv_color.h"
19 #include "../../../stdlib/lv_string.h"
20
21 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
22 #include "neon/lv_blend_neon.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
24 #include "helium/lv_blend_helium.h"
25 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
26 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
27 #endif
28
29 /*********************
30 * DEFINES
31 *********************/
32
33 /**********************
34 * TYPEDEFS
35 **********************/
36
37 /**********************
38 * STATIC PROTOTYPES
39 **********************/
40
41 #if LV_DRAW_SW_SUPPORT_I1
42 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
43
44 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
45 #endif
46
47 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
48
49 #if LV_DRAW_SW_SUPPORT_AL88
50 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
51 #endif
52
53 #if LV_DRAW_SW_SUPPORT_RGB565
54 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
55 #endif
56
57 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
58 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
59 const uint8_t src_px_size);
60 #endif
61
62 #if LV_DRAW_SW_SUPPORT_ARGB8888
63 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
64 #endif
65
66 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_8_8_mix(const uint8_t src, uint8_t * dest, uint8_t mix);
67
68 static inline void /* LV_ATTRIBUTE_FAST_MEM */ blend_non_normal_pixel(uint8_t * dest, lv_color32_t src,
69 lv_blend_mode_t mode);
70
71 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
72
73 /**********************
74 * STATIC VARIABLES
75 **********************/
76
77 /**********************
78 * MACROS
79 **********************/
80
81 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_L8
82 #define LV_DRAW_SW_COLOR_BLEND_TO_L8(...) LV_RESULT_INVALID
83 #endif
84
85 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_OPA
86 #define LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
87 #endif
88
89 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_MASK
90 #define LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
91 #endif
92
93 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_L8_MIX_MASK_OPA
94 #define LV_DRAW_SW_COLOR_BLEND_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
95 #endif
96
97 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8
98 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8(...) LV_RESULT_INVALID
99 #endif
100
101 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_OPA
102 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
103 #endif
104
105 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_MASK
106 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
107 #endif
108
109 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_MIX_MASK_OPA
110 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
111 #endif
112
113 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8
114 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8(...) LV_RESULT_INVALID
115 #endif
116
117 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_OPA
118 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
119 #endif
120
121 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_MASK
122 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
123 #endif
124
125 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_MIX_MASK_OPA
126 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
127 #endif
128
129 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8
130 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8(...) LV_RESULT_INVALID
131 #endif
132
133 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_OPA
134 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
135 #endif
136
137 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_MASK
138 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
139 #endif
140
141 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_MIX_MASK_OPA
142 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
143 #endif
144
145 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8
146 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8(...) LV_RESULT_INVALID
147 #endif
148
149 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_OPA
150 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
151 #endif
152
153 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_MASK
154 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
155 #endif
156
157 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_MIX_MASK_OPA
158 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
159 #endif
160
161 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8
162 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8(...) LV_RESULT_INVALID
163 #endif
164
165 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_OPA
166 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_OPA(...) LV_RESULT_INVALID
167 #endif
168
169 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_MASK
170 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_MASK(...) LV_RESULT_INVALID
171 #endif
172
173 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_MIX_MASK_OPA
174 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(...) LV_RESULT_INVALID
175 #endif
176
177 /**********************
178 * GLOBAL FUNCTIONS
179 **********************/
180
lv_draw_sw_blend_color_to_l8(lv_draw_sw_blend_fill_dsc_t * dsc)181 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_l8(lv_draw_sw_blend_fill_dsc_t * dsc)
182 {
183 int32_t w = dsc->dest_w;
184 int32_t h = dsc->dest_h;
185 lv_opa_t opa = dsc->opa;
186 const lv_opa_t * mask = dsc->mask_buf;
187 int32_t mask_stride = dsc->mask_stride;
188 int32_t dest_stride = dsc->dest_stride;
189
190 int32_t x;
191 int32_t y;
192
193 LV_UNUSED(w);
194 LV_UNUSED(h);
195 LV_UNUSED(x);
196 LV_UNUSED(y);
197 LV_UNUSED(opa);
198 LV_UNUSED(mask);
199 LV_UNUSED(mask_stride);
200 LV_UNUSED(dest_stride);
201
202 /*Simple fill*/
203 if(mask == NULL && opa >= LV_OPA_MAX) {
204 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_L8(dsc)) {
205 uint8_t color8 = lv_color_luminance(dsc->color);
206 uint8_t * dest_buf = dsc->dest_buf;
207 for(y = 0; y < h; y++) {
208 for(x = 0; x < w - 16; x += 16) {
209 dest_buf[x + 0] = color8;
210 dest_buf[x + 1] = color8;
211 dest_buf[x + 2] = color8;
212 dest_buf[x + 3] = color8;
213
214 dest_buf[x + 4] = color8;
215 dest_buf[x + 5] = color8;
216 dest_buf[x + 6] = color8;
217 dest_buf[x + 7] = color8;
218
219 dest_buf[x + 8] = color8;
220 dest_buf[x + 9] = color8;
221 dest_buf[x + 10] = color8;
222 dest_buf[x + 11] = color8;
223
224 dest_buf[x + 12] = color8;
225 dest_buf[x + 13] = color8;
226 dest_buf[x + 14] = color8;
227 dest_buf[x + 15] = color8;
228 }
229 for(; x < w; x ++) {
230 dest_buf[x] = color8;
231 }
232
233 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
234 }
235 }
236 }
237 /*Opacity only*/
238 else if(mask == NULL && opa < LV_OPA_MAX) {
239 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_OPA(dsc)) {
240 uint8_t color8 = lv_color_luminance(dsc->color);
241 uint8_t * dest_buf = dsc->dest_buf;
242
243 for(y = 0; y < h; y++) {
244 for(x = 0; x < w; x++) {
245 lv_color_8_8_mix(color8, &dest_buf[x], opa);
246 }
247 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
248 }
249 }
250
251 }
252 /*Masked with full opacity*/
253 else if(mask && opa >= LV_OPA_MAX) {
254 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_L8_WITH_MASK(dsc)) {
255 uint8_t color8 = lv_color_luminance(dsc->color);
256 uint8_t * dest_buf = dsc->dest_buf;
257 for(y = 0; y < h; y++) {
258 for(x = 0; x < w; x++) {
259 lv_color_8_8_mix(color8, &dest_buf[x], mask[x]);
260 }
261 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
262 mask += mask_stride;
263 }
264 }
265
266 }
267 /*Masked with opacity*/
268 else {
269 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_L8_MIX_MASK_OPA(dsc)) {
270 uint8_t color8 = lv_color_luminance(dsc->color);
271 uint8_t * dest_buf = dsc->dest_buf;
272 for(y = 0; y < h; y++) {
273 for(x = 0; x < w; x++) {
274 lv_color_8_8_mix(color8, &dest_buf[x], LV_OPA_MIX2(mask[x], opa));
275 }
276 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
277 mask += mask_stride;
278 }
279 }
280 }
281 }
282
lv_draw_sw_blend_image_to_l8(lv_draw_sw_blend_image_dsc_t * dsc)283 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_l8(lv_draw_sw_blend_image_dsc_t * dsc)
284 {
285 switch(dsc->src_color_format) {
286 #if LV_DRAW_SW_SUPPORT_RGB565
287 case LV_COLOR_FORMAT_RGB565:
288 rgb565_image_blend(dsc);
289 break;
290 #endif
291 #if LV_DRAW_SW_SUPPORT_RGB888
292 case LV_COLOR_FORMAT_RGB888:
293 rgb888_image_blend(dsc, 3);
294 break;
295 #endif
296 #if LV_DRAW_SW_SUPPORT_XRGB8888
297 case LV_COLOR_FORMAT_XRGB8888:
298 rgb888_image_blend(dsc, 4);
299 break;
300 #endif
301 #if LV_DRAW_SW_SUPPORT_ARGB8888
302 case LV_COLOR_FORMAT_ARGB8888:
303 argb8888_image_blend(dsc);
304 break;
305 #endif
306 case LV_COLOR_FORMAT_L8:
307 l8_image_blend(dsc);
308 break;
309 #if LV_DRAW_SW_SUPPORT_AL88
310 case LV_COLOR_FORMAT_AL88:
311 al88_image_blend(dsc);
312 break;
313 #endif
314 #if LV_DRAW_SW_SUPPORT_I1
315 case LV_COLOR_FORMAT_I1:
316 i1_image_blend(dsc);
317 break;
318 #endif
319 default:
320 LV_LOG_WARN("Not supported source color format");
321 break;
322 }
323 }
324
325 /**********************
326 * STATIC FUNCTIONS
327 **********************/
328
329 #if LV_DRAW_SW_SUPPORT_I1
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)330 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
331 {
332 int32_t w = dsc->dest_w;
333 int32_t h = dsc->dest_h;
334 lv_opa_t opa = dsc->opa;
335 uint8_t * dest_buf_l8 = dsc->dest_buf;
336 int32_t dest_stride = dsc->dest_stride;
337 const uint8_t * src_buf_i1 = dsc->src_buf;
338 int32_t src_stride = dsc->src_stride;
339 const lv_opa_t * mask_buf = dsc->mask_buf;
340 int32_t mask_stride = dsc->mask_stride;
341
342 int32_t dest_x;
343 int32_t src_x;
344 int32_t y;
345
346 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
347 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
348 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8(dsc)) {
349 for(y = 0; y < h; y++) {
350 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
351 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
352 lv_color_8_8_mix(chan_val, &dest_buf_l8[dest_x], opa);
353 }
354 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
355 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
356 }
357 }
358 }
359 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
360 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_OPA(dsc)) {
361 for(y = 0; y < h; y++) {
362 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
363 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
364 lv_color_8_8_mix(chan_val, &dest_buf_l8[dest_x], opa);
365 }
366 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
367 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
368 }
369 }
370 }
371 else if(mask_buf && opa >= LV_OPA_MAX) {
372 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_WITH_MASK(dsc)) {
373 for(y = 0; y < h; y++) {
374 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
375 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
376 lv_color_8_8_mix(chan_val, &dest_buf_l8[dest_x], mask_buf[src_x]);
377 }
378 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
379 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
380 mask_buf += mask_stride;
381 }
382 }
383 }
384 else if(mask_buf && opa < LV_OPA_MAX) {
385 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc)) {
386 for(y = 0; y < h; y++) {
387 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
388 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
389 lv_color_8_8_mix(chan_val, &dest_buf_l8[dest_x], LV_OPA_MIX2(mask_buf[src_x], opa));
390 }
391 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
392 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
393 mask_buf += mask_stride;
394 }
395 }
396 }
397 }
398 else {
399 lv_color32_t src_argb;
400 for(y = 0; y < h; y++) {
401 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
402 src_argb.red = get_bit(src_buf_i1, src_x) * 255;
403 src_argb.green = src_argb.red;
404 src_argb.blue = src_argb.red;
405 if(mask_buf == NULL) src_argb.alpha = opa;
406 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
407 blend_non_normal_pixel(&dest_buf_l8[dest_x], src_argb, dsc->blend_mode);
408 }
409 if(mask_buf) mask_buf += mask_stride;
410 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
411 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
412 }
413 }
414 }
415 #endif
416
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)417 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
418 {
419 int32_t w = dsc->dest_w;
420 int32_t h = dsc->dest_h;
421 lv_opa_t opa = dsc->opa;
422 uint8_t * dest_buf_l8 = dsc->dest_buf;
423 int32_t dest_stride = dsc->dest_stride;
424 const uint8_t * src_buf_l8 = dsc->src_buf;
425 int32_t src_stride = dsc->src_stride;
426 const lv_opa_t * mask_buf = dsc->mask_buf;
427 int32_t mask_stride = dsc->mask_stride;
428
429 int32_t dest_x;
430 int32_t src_x;
431 int32_t y;
432
433 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
434 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
435 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8(dsc)) {
436 for(y = 0; y < h; y++) {
437 lv_memcpy(dest_buf_l8, src_buf_l8, w);
438 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
439 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
440 }
441 }
442 }
443 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
444 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_OPA(dsc)) {
445 for(y = 0; y < h; y++) {
446 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
447 lv_color_8_8_mix(src_buf_l8[src_x], &dest_buf_l8[dest_x], opa);
448 }
449 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
450 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
451 }
452 }
453 }
454 else if(mask_buf && opa >= LV_OPA_MAX) {
455 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_MASK(dsc)) {
456 for(y = 0; y < h; y++) {
457 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
458 lv_color_8_8_mix(src_buf_l8[src_x], &dest_buf_l8[dest_x], mask_buf[src_x]);
459 }
460 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
461 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
462 mask_buf += mask_stride;
463 }
464 }
465 }
466 else if(mask_buf && opa < LV_OPA_MAX) {
467 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc)) {
468 for(y = 0; y < h; y++) {
469 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
470 lv_color_8_8_mix(src_buf_l8[src_x], &dest_buf_l8[dest_x], LV_OPA_MIX2(mask_buf[src_x], opa));
471 }
472 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
473 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
474 mask_buf += mask_stride;
475 }
476 }
477 }
478 }
479 else {
480 lv_color32_t src_argb;
481 for(y = 0; y < h; y++) {
482 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
483 src_argb.red = src_buf_l8[src_x];
484 src_argb.green = src_buf_l8[src_x];
485 src_argb.blue = src_buf_l8[src_x];
486 if(mask_buf == NULL) src_argb.alpha = opa;
487 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
488 blend_non_normal_pixel(&dest_buf_l8[dest_x], src_argb, dsc->blend_mode);
489 }
490 if(mask_buf) mask_buf += mask_stride;
491 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
492 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
493 }
494 }
495 }
496
497 #if LV_DRAW_SW_SUPPORT_AL88
498
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)499 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
500 {
501 int32_t w = dsc->dest_w;
502 int32_t h = dsc->dest_h;
503 lv_opa_t opa = dsc->opa;
504 uint8_t * dest_buf_l8 = dsc->dest_buf;
505 int32_t dest_stride = dsc->dest_stride;
506 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
507 int32_t src_stride = dsc->src_stride;
508 const lv_opa_t * mask_buf = dsc->mask_buf;
509 int32_t mask_stride = dsc->mask_stride;
510
511 int32_t dest_x;
512 int32_t src_x;
513 int32_t y;
514
515 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
516 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
517 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8(dsc)) {
518 for(y = 0; y < h; y++) {
519 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
520 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_buf_l8[dest_x], src_buf_al88[src_x].alpha);
521 }
522 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
523 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
524 }
525 }
526 }
527 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
528 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_OPA(dsc)) {
529 for(y = 0; y < h; y++) {
530 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
531 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_buf_l8[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa));
532 }
533 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
534 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
535 }
536 }
537 }
538 else if(mask_buf && opa >= LV_OPA_MAX) {
539 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_WITH_MASK(dsc)) {
540 for(y = 0; y < h; y++) {
541 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
542 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_buf_l8[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha,
543 mask_buf[src_x]));
544 }
545 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
546 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
547 mask_buf += mask_stride;
548 }
549 }
550 }
551 else if(mask_buf && opa < LV_OPA_MAX) {
552 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc)) {
553 for(y = 0; y < h; y++) {
554 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
555 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_buf_l8[dest_x], LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[src_x],
556 opa));
557 }
558 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
559 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
560 mask_buf += mask_stride;
561 }
562 }
563 }
564 }
565 else {
566 lv_color32_t src_argb;
567 for(y = 0; y < h; y++) {
568 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
569 src_argb.red = src_buf_al88[src_x].lumi;
570 src_argb.green = src_buf_al88[src_x].lumi;
571 src_argb.blue = src_buf_al88[src_x].lumi;
572 if(mask_buf == NULL) src_argb.alpha = opa;
573 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
574 blend_non_normal_pixel(&dest_buf_l8[dest_x], src_argb, dsc->blend_mode);
575 }
576 if(mask_buf) mask_buf += mask_stride;
577 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
578 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
579 }
580 }
581 }
582
583 #endif
584
585 #if LV_DRAW_SW_SUPPORT_RGB565
586
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)587 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
588 {
589 int32_t w = dsc->dest_w;
590 int32_t h = dsc->dest_h;
591 lv_opa_t opa = dsc->opa;
592 uint8_t * dest_buf_u8 = dsc->dest_buf;
593 int32_t dest_stride = dsc->dest_stride;
594 const lv_color16_t * src_buf_c16 = (const lv_color16_t *)dsc->src_buf;
595 int32_t src_stride = dsc->src_stride;
596 const lv_opa_t * mask_buf = dsc->mask_buf;
597 int32_t mask_stride = dsc->mask_stride;
598
599 int32_t src_x;
600 int32_t dest_x;
601 int32_t y;
602
603 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
604 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
605 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8(dsc)) {
606 for(y = 0; y < h; y++) {
607 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
608 dest_buf_u8[dest_x] = lv_color16_luminance(src_buf_c16[src_x]);
609 }
610 dest_buf_u8 += dest_stride;
611 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
612 }
613 }
614 }
615 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
616 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_OPA(dsc, dest_px_size)) {
617 for(y = 0; y < h; y++) {
618 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
619 lv_color_8_8_mix(lv_color16_luminance(src_buf_c16[src_x]), &dest_buf_u8[dest_x], opa);
620 }
621 dest_buf_u8 += dest_stride;
622 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
623 }
624 }
625 }
626 else if(mask_buf && opa >= LV_OPA_MAX) {
627 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_WITH_MASK(dsc, dest_px_size)) {
628 for(y = 0; y < h; y++) {
629 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
630 lv_color_8_8_mix(lv_color16_luminance(src_buf_c16[src_x]), &dest_buf_u8[dest_x], mask_buf[src_x]);
631 }
632 dest_buf_u8 += dest_stride;
633 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
634 mask_buf += mask_stride;
635 }
636 }
637 }
638 else {
639 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc, dest_px_size)) {
640 for(y = 0; y < h; y++) {
641 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
642 lv_color_8_8_mix(lv_color16_luminance(src_buf_c16[src_x]), &dest_buf_u8[dest_x], LV_OPA_MIX2(opa, mask_buf[src_x]));
643 }
644 dest_buf_u8 += dest_stride;
645 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
646 mask_buf += mask_stride;
647 }
648 }
649 }
650 }
651 else {
652 lv_color32_t src_argb;
653 for(y = 0; y < h; y++) {
654 for(src_x = 0, dest_x = 0; src_x < w; src_x++, dest_x++) {
655 src_argb.red = (src_buf_c16[src_x].red * 2106) >> 8;
656 src_argb.green = (src_buf_c16[src_x].green * 1037) >> 8;
657 src_argb.blue = (src_buf_c16[src_x].blue * 2106) >> 8;
658 if(mask_buf == NULL) src_argb.alpha = opa;
659 else src_argb.alpha = LV_OPA_MIX2(mask_buf[src_x], opa);
660 blend_non_normal_pixel(&dest_buf_u8[dest_x], src_argb, dsc->blend_mode);
661 }
662 if(mask_buf) mask_buf += mask_stride;
663 dest_buf_u8 += dest_stride;
664 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
665 }
666 }
667 }
668
669 #endif
670
671 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
672
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t src_px_size)673 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
674 const uint8_t src_px_size)
675 {
676 int32_t w = dsc->dest_w;
677 int32_t h = dsc->dest_h;
678 lv_opa_t opa = dsc->opa;
679 uint8_t * dest_buf_l8 = dsc->dest_buf;
680 int32_t dest_stride = dsc->dest_stride;
681 const uint8_t * src_buf_u8 = dsc->src_buf;
682 int32_t src_stride = dsc->src_stride;
683 const lv_opa_t * mask_buf = dsc->mask_buf;
684 int32_t mask_stride = dsc->mask_stride;
685
686 int32_t dest_x;
687 int32_t src_x;
688 int32_t y;
689
690 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
691 /*Special case*/
692 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
693 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8(dsc, src_px_size)) {
694 for(y = 0; y < h; y++) {
695 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
696 dest_buf_l8[dest_x] = lv_color24_luminance(&src_buf_u8[src_x]);
697 }
698 dest_buf_l8 += dest_stride;
699 src_buf_u8 += src_stride;
700 }
701 }
702 }
703 if(mask_buf == NULL && opa < LV_OPA_MAX) {
704 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_OPA(dsc, dest_px_size, src_px_size)) {
705 for(y = 0; y < h; y++) {
706 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
707 lv_color_8_8_mix(lv_color24_luminance(&src_buf_u8[src_x]), &dest_buf_l8[dest_x], opa);
708 }
709 dest_buf_l8 += dest_stride;
710 src_buf_u8 += src_stride;
711 }
712 }
713 }
714 if(mask_buf && opa >= LV_OPA_MAX) {
715 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_WITH_MASK(dsc, dest_px_size, src_px_size)) {
716 uint32_t mask_x;
717 for(y = 0; y < h; y++) {
718 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
719 lv_color_8_8_mix(lv_color24_luminance(&src_buf_u8[src_x]), &dest_buf_l8[dest_x], mask_buf[mask_x]);
720 }
721 dest_buf_l8 += dest_stride;
722 src_buf_u8 += src_stride;
723 mask_buf += mask_stride;
724 }
725 }
726 }
727 if(mask_buf && opa < LV_OPA_MAX) {
728 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc, dest_px_size, src_px_size)) {
729 uint32_t mask_x;
730 for(y = 0; y < h; y++) {
731 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
732 lv_color_8_8_mix(lv_color24_luminance(&src_buf_u8[src_x]), &dest_buf_l8[dest_x], LV_OPA_MIX2(opa, mask_buf[mask_x]));
733 }
734 dest_buf_l8 += dest_stride;
735 src_buf_u8 += src_stride;
736 mask_buf += mask_stride;
737 }
738 }
739 }
740 }
741 else {
742 lv_color32_t src_argb;
743 for(y = 0; y < h; y++) {
744 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
745 src_argb.red = src_buf_u8[src_x + 2];
746 src_argb.green = src_buf_u8[src_x + 1];
747 src_argb.blue = src_buf_u8[src_x + 0];
748 if(mask_buf == NULL) src_argb.alpha = opa;
749 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
750
751 blend_non_normal_pixel(&dest_buf_l8[dest_x], src_argb, dsc->blend_mode);
752 }
753 if(mask_buf) mask_buf += mask_stride;
754 dest_buf_l8 += dest_stride;
755 src_buf_u8 += src_stride;
756 }
757 }
758 }
759
760 #endif
761
762 #if LV_DRAW_SW_SUPPORT_ARGB8888
763
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)764 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
765 {
766 int32_t w = dsc->dest_w;
767 int32_t h = dsc->dest_h;
768 lv_opa_t opa = dsc->opa;
769 uint8_t * dest_buf_l8 = dsc->dest_buf;
770 int32_t dest_stride = dsc->dest_stride;
771 const lv_color32_t * src_buf_c32 = dsc->src_buf;
772 int32_t src_stride = dsc->src_stride;
773 const lv_opa_t * mask_buf = dsc->mask_buf;
774 int32_t mask_stride = dsc->mask_stride;
775
776 int32_t x;
777 int32_t y;
778
779 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
780 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
781 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8(dsc)) {
782 for(y = 0; y < h; y++) {
783 for(x = 0; x < w; x++) {
784 lv_color_8_8_mix(lv_color32_luminance(src_buf_c32[x]), &dest_buf_l8[x], src_buf_c32[x].alpha);
785 }
786 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
787 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
788 }
789 }
790 }
791 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
792 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_OPA(dsc)) {
793 for(y = 0; y < h; y++) {
794 for(x = 0; x < w; x++) {
795 lv_color_8_8_mix(lv_color32_luminance(src_buf_c32[x]), &dest_buf_l8[x], LV_OPA_MIX2(src_buf_c32[x].alpha, opa));
796 }
797 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
798 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
799 }
800 }
801 }
802 else if(mask_buf && opa >= LV_OPA_MAX) {
803 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_WITH_MASK(dsc)) {
804 for(y = 0; y < h; y++) {
805 for(x = 0; x < w; x++) {
806 lv_color_8_8_mix(lv_color32_luminance(src_buf_c32[x]), &dest_buf_l8[x], LV_OPA_MIX2(src_buf_c32[x].alpha, mask_buf[x]));
807 }
808 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
809 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
810 mask_buf += mask_stride;
811 }
812 }
813 }
814 else if(mask_buf && opa < LV_OPA_MAX) {
815 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_L8_MIX_MASK_OPA(dsc)) {
816 for(y = 0; y < h; y++) {
817 for(x = 0; x < w; x++) {
818 lv_color_8_8_mix(lv_color32_luminance(src_buf_c32[x]), &dest_buf_l8[x], LV_OPA_MIX3(src_buf_c32[x].alpha, opa,
819 mask_buf[x]));
820 }
821 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
822 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
823 mask_buf += mask_stride;
824 }
825 }
826 }
827 }
828 else {
829 for(y = 0; y < h; y++) {
830 for(x = 0; x < w; x++) {
831 lv_color32_t color_argb = src_buf_c32[x];
832 if(mask_buf == NULL) color_argb.alpha = LV_OPA_MIX2(color_argb.alpha, opa);
833 else color_argb.alpha = LV_OPA_MIX3(color_argb.alpha, mask_buf[x], opa);
834 blend_non_normal_pixel(&dest_buf_l8[x], color_argb, dsc->blend_mode);
835 }
836 if(mask_buf) mask_buf += mask_stride;
837 dest_buf_l8 = drawbuf_next_row(dest_buf_l8, dest_stride);
838 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
839 }
840 }
841 }
842
843 #endif
844
lv_color_8_8_mix(const uint8_t src,uint8_t * dest,uint8_t mix)845 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_8_8_mix(const uint8_t src, uint8_t * dest, uint8_t mix)
846 {
847
848 if(mix == 0) return;
849
850 if(mix >= LV_OPA_MAX) {
851 *dest = src;
852 }
853 else {
854 lv_opa_t mix_inv = 255 - mix;
855 *dest = (uint32_t)((uint32_t)src * mix + dest[0] * mix_inv) >> 8;
856 }
857 }
858
859
860 #if LV_DRAW_SW_SUPPORT_I1
861
get_bit(const uint8_t * buf,int32_t bit_idx)862 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
863 {
864 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
865 }
866
867 #endif
868
blend_non_normal_pixel(uint8_t * dest,lv_color32_t src,lv_blend_mode_t mode)869 static inline void LV_ATTRIBUTE_FAST_MEM blend_non_normal_pixel(uint8_t * dest, lv_color32_t src, lv_blend_mode_t mode)
870 {
871 uint8_t res;
872 int32_t src_lumi = lv_color32_luminance(src);
873 switch(mode) {
874 case LV_BLEND_MODE_ADDITIVE:
875 res = LV_MIN(*dest + src_lumi, 255);
876 break;
877 case LV_BLEND_MODE_SUBTRACTIVE:
878 res = LV_MAX(*dest - src_lumi, 0);
879 break;
880 case LV_BLEND_MODE_MULTIPLY:
881 res = (*dest * src_lumi) >> 8;
882 break;
883 default:
884 LV_LOG_WARN("Not supported blend mode: %d", mode);
885 return;
886 }
887 lv_color_8_8_mix(res, dest, src.alpha);
888 }
889
drawbuf_next_row(const void * buf,uint32_t stride)890 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
891 {
892 return (void *)((uint8_t *)buf + stride);
893 }
894
895 #endif
896
897 #endif
898