1 /**
2 * @file lv_draw_sw_blend_to_rgb565.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_rgb565.h"
10 #if LV_USE_DRAW_SW
11
12 #if LV_DRAW_SW_SUPPORT_RGB565
13
14 #include "lv_draw_sw_blend_private.h"
15 #include "../../../misc/lv_math.h"
16 #include "../../../display/lv_display.h"
17 #include "../../../core/lv_refr.h"
18 #include "../../../misc/lv_color.h"
19 #include "../../../stdlib/lv_string.h"
20
21 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
22 #include "neon/lv_blend_neon.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
24 #include "helium/lv_blend_helium.h"
25 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
26 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
27 #endif
28
29 /*********************
30 * DEFINES
31 *********************/
32
33 /**********************
34 * TYPEDEFS
35 **********************/
36
37 /**********************
38 * STATIC PROTOTYPES
39 **********************/
40
41 #if LV_DRAW_SW_SUPPORT_AL88
42 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
43 #endif
44
45 #if LV_DRAW_SW_SUPPORT_I1
46 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
47
48 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
49 #endif
50
51 #if LV_DRAW_SW_SUPPORT_L8
52 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
53 #endif
54
55 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
56
57 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
58 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
59 const uint8_t src_px_size);
60 #endif
61
62 #if LV_DRAW_SW_SUPPORT_ARGB8888
63 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
64 #endif
65
66 static inline uint16_t /* LV_ATTRIBUTE_FAST_MEM */ l8_to_rgb565(const uint8_t c1);
67
68 static inline uint16_t /* LV_ATTRIBUTE_FAST_MEM */ lv_color_8_16_mix(const uint8_t c1, uint16_t c2, uint8_t mix);
69
70 static inline uint16_t /* LV_ATTRIBUTE_FAST_MEM */ lv_color_24_16_mix(const uint8_t * c1, uint16_t c2, uint8_t mix);
71
72 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
73
74 /**********************
75 * STATIC VARIABLES
76 **********************/
77
78 /**********************
79 * MACROS
80 **********************/
81
82 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB565
83 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB565(...) LV_RESULT_INVALID
84 #endif
85
86 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_OPA
87 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
88 #endif
89
90 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_MASK
91 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
92 #endif
93
94 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB565_MIX_MASK_OPA
95 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
96 #endif
97
98 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565
99 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
100 #endif
101
102 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_OPA
103 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
104 #endif
105
106 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_MASK
107 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
108 #endif
109
110 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
111 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
112 #endif
113
114 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565
115 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
116 #endif
117
118 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_OPA
119 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
120 #endif
121
122 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_MASK
123 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
124 #endif
125
126 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
127 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
128 #endif
129
130 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565
131 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
132 #endif
133
134 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_OPA
135 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
136 #endif
137
138 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_MASK
139 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
140 #endif
141
142 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
143 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
144 #endif
145
146 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565
147 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
148 #endif
149
150 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_OPA
151 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
152 #endif
153
154 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_MASK
155 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
156 #endif
157
158 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
159 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
160 #endif
161
162 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565
163 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
164 #endif
165
166 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_OPA
167 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
168 #endif
169
170 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_MASK
171 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
172 #endif
173
174 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
175 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
176 #endif
177
178 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565
179 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565(...) LV_RESULT_INVALID
180 #endif
181
182 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_OPA
183 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_OPA(...) LV_RESULT_INVALID
184 #endif
185
186 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_MASK
187 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_MASK(...) LV_RESULT_INVALID
188 #endif
189
190 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA
191 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(...) LV_RESULT_INVALID
192 #endif
193
194 /**********************
195 * GLOBAL FUNCTIONS
196 **********************/
197
198 /**
199 * Fill an area with a color.
200 * Supports normal fill, fill with opacity, fill with mask, and fill with mask and opacity.
201 * dest_buf and color have native color depth. (RGB565, RGB888, XRGB8888)
202 * The background (dest_buf) cannot have alpha channel
203 * @param dest_buf
204 * @param dest_area
205 * @param dest_stride
206 * @param color
207 * @param opa
208 * @param mask
209 * @param mask_stride
210 */
lv_draw_sw_blend_color_to_rgb565(lv_draw_sw_blend_fill_dsc_t * dsc)211 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_rgb565(lv_draw_sw_blend_fill_dsc_t * dsc)
212 {
213 int32_t w = dsc->dest_w;
214 int32_t h = dsc->dest_h;
215 uint16_t color16 = lv_color_to_u16(dsc->color);
216 lv_opa_t opa = dsc->opa;
217 const lv_opa_t * mask = dsc->mask_buf;
218 int32_t mask_stride = dsc->mask_stride;
219 uint16_t * dest_buf_u16 = dsc->dest_buf;
220 int32_t dest_stride = dsc->dest_stride;
221
222 int32_t x;
223 int32_t y;
224
225 LV_UNUSED(w);
226 LV_UNUSED(h);
227 LV_UNUSED(x);
228 LV_UNUSED(y);
229 LV_UNUSED(opa);
230 LV_UNUSED(mask);
231 LV_UNUSED(color16);
232 LV_UNUSED(mask_stride);
233 LV_UNUSED(dest_stride);
234 LV_UNUSED(dest_buf_u16);
235
236 /*Simple fill*/
237 if(mask == NULL && opa >= LV_OPA_MAX) {
238 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB565(dsc)) {
239 for(y = 0; y < h; y++) {
240 uint16_t * dest_end_final = dest_buf_u16 + w;
241 uint32_t * dest_end_mid = (uint32_t *)((uint16_t *) dest_buf_u16 + ((w - 1) & ~(0xF)));
242 if((lv_uintptr_t)&dest_buf_u16[0] & 0x3) {
243 dest_buf_u16[0] = color16;
244 dest_buf_u16++;
245 }
246
247 uint32_t c32 = (uint32_t)color16 + ((uint32_t)color16 << 16);
248 uint32_t * dest32 = (uint32_t *)dest_buf_u16;
249 while(dest32 < dest_end_mid) {
250 dest32[0] = c32;
251 dest32[1] = c32;
252 dest32[2] = c32;
253 dest32[3] = c32;
254 dest32[4] = c32;
255 dest32[5] = c32;
256 dest32[6] = c32;
257 dest32[7] = c32;
258 dest32 += 8;
259 }
260
261 dest_buf_u16 = (uint16_t *)dest32;
262
263 while(dest_buf_u16 < dest_end_final) {
264 *dest_buf_u16 = color16;
265 dest_buf_u16++;
266 }
267
268 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
269 dest_buf_u16 -= w;
270 }
271 }
272
273 }
274 /*Opacity only*/
275 else if(mask == NULL && opa < LV_OPA_MAX) {
276 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_OPA(dsc)) {
277 uint32_t last_dest32_color = dest_buf_u16[0] + 1; /*Set to value which is not equal to the first pixel*/
278 uint32_t last_res32_color = 0;
279
280 for(y = 0; y < h; y++) {
281 x = 0;
282 if((lv_uintptr_t)&dest_buf_u16[0] & 0x3) {
283 dest_buf_u16[0] = lv_color_16_16_mix(color16, dest_buf_u16[0], opa);
284 x = 1;
285 }
286
287 for(; x < w - 2; x += 2) {
288 if(dest_buf_u16[x] != dest_buf_u16[x + 1]) {
289 dest_buf_u16[x + 0] = lv_color_16_16_mix(color16, dest_buf_u16[x + 0], opa);
290 dest_buf_u16[x + 1] = lv_color_16_16_mix(color16, dest_buf_u16[x + 1], opa);
291 }
292 else {
293 volatile uint32_t * dest32 = (uint32_t *)&dest_buf_u16[x];
294 if(last_dest32_color == *dest32) {
295 *dest32 = last_res32_color;
296 }
297 else {
298 last_dest32_color = *dest32;
299
300 dest_buf_u16[x] = lv_color_16_16_mix(color16, dest_buf_u16[x + 0], opa);
301 dest_buf_u16[x + 1] = dest_buf_u16[x];
302
303 last_res32_color = *dest32;
304 }
305 }
306 }
307
308 for(; x < w ; x++) {
309 dest_buf_u16[x] = lv_color_16_16_mix(color16, dest_buf_u16[x], opa);
310 }
311 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
312 }
313 }
314
315 }
316
317 /*Masked with full opacity*/
318 else if(mask && opa >= LV_OPA_MAX) {
319 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB565_WITH_MASK(dsc)) {
320 for(y = 0; y < h; y++) {
321 x = 0;
322 if((lv_uintptr_t)(mask) & 0x1) {
323 dest_buf_u16[x] = lv_color_16_16_mix(color16, dest_buf_u16[x], mask[x]);
324 x++;
325 }
326
327 for(; x <= w - 2; x += 2) {
328 uint16_t mask16 = *((uint16_t *)&mask[x]);
329 if(mask16 == 0xFFFF) {
330 dest_buf_u16[x + 0] = color16;
331 dest_buf_u16[x + 1] = color16;
332 }
333 else if(mask16 != 0) {
334 dest_buf_u16[x + 0] = lv_color_16_16_mix(color16, dest_buf_u16[x + 0], mask[x + 0]);
335 dest_buf_u16[x + 1] = lv_color_16_16_mix(color16, dest_buf_u16[x + 1], mask[x + 1]);
336 }
337 }
338
339 for(; x < w ; x++) {
340 dest_buf_u16[x] = lv_color_16_16_mix(color16, dest_buf_u16[x], mask[x]);
341 }
342 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
343 mask += mask_stride;
344 }
345 }
346
347 }
348 /*Masked with opacity*/
349 else if(mask && opa < LV_OPA_MAX) {
350 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB565_MIX_MASK_OPA(dsc)) {
351 for(y = 0; y < h; y++) {
352 for(x = 0; x < w; x++) {
353 dest_buf_u16[x] = lv_color_16_16_mix(color16, dest_buf_u16[x], LV_OPA_MIX2(mask[x], opa));
354 }
355 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
356 mask += mask_stride;
357 }
358 }
359 }
360 }
361
lv_draw_sw_blend_image_to_rgb565(lv_draw_sw_blend_image_dsc_t * dsc)362 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_rgb565(lv_draw_sw_blend_image_dsc_t * dsc)
363 {
364 switch(dsc->src_color_format) {
365 case LV_COLOR_FORMAT_RGB565:
366 rgb565_image_blend(dsc);
367 break;
368 #if LV_DRAW_SW_SUPPORT_RGB888
369 case LV_COLOR_FORMAT_RGB888:
370 rgb888_image_blend(dsc, 3);
371 break;
372 #endif
373 #if LV_DRAW_SW_SUPPORT_XRGB8888
374 case LV_COLOR_FORMAT_XRGB8888:
375 rgb888_image_blend(dsc, 4);
376 break;
377 #endif
378 #if LV_DRAW_SW_SUPPORT_ARGB8888
379 case LV_COLOR_FORMAT_ARGB8888:
380 argb8888_image_blend(dsc);
381 break;
382 #endif
383 #if LV_DRAW_SW_SUPPORT_L8
384 case LV_COLOR_FORMAT_L8:
385 l8_image_blend(dsc);
386 break;
387 #endif
388 #if LV_DRAW_SW_SUPPORT_AL88
389 case LV_COLOR_FORMAT_AL88:
390 al88_image_blend(dsc);
391 break;
392 #endif
393 #if LV_DRAW_SW_SUPPORT_I1
394 case LV_COLOR_FORMAT_I1:
395 i1_image_blend(dsc);
396 break;
397 #endif
398 default:
399 LV_LOG_WARN("Not supported source color format");
400 break;
401 }
402 }
403
404 /**********************
405 * STATIC FUNCTIONS
406 **********************/
407
408 #if LV_DRAW_SW_SUPPORT_I1
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)409 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
410 {
411 int32_t w = dsc->dest_w;
412 int32_t h = dsc->dest_h;
413 lv_opa_t opa = dsc->opa;
414 uint16_t * dest_buf_u16 = dsc->dest_buf;
415 int32_t dest_stride = dsc->dest_stride;
416 const uint8_t * src_buf_i1 = dsc->src_buf;
417 int32_t src_stride = dsc->src_stride;
418 const lv_opa_t * mask_buf = dsc->mask_buf;
419 int32_t mask_stride = dsc->mask_stride;
420
421 int32_t dest_x;
422 int32_t src_x;
423 int32_t y;
424
425 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
426 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
427 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565(dsc)) {
428 for(y = 0; y < h; y++) {
429 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
430 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
431 dest_buf_u16[dest_x] = l8_to_rgb565(chan_val);
432 }
433 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
434 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
435 }
436 }
437 }
438 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
439 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc)) {
440 for(y = 0; y < h; y++) {
441 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
442 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
443 dest_buf_u16[dest_x] = lv_color_8_16_mix(chan_val, dest_buf_u16[dest_x], opa);
444 }
445 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
446 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
447 }
448 }
449 }
450 else if(mask_buf && opa >= LV_OPA_MAX) {
451 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc)) {
452
453 for(y = 0; y < h; y++) {
454 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
455 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
456 dest_buf_u16[dest_x] = lv_color_8_16_mix(chan_val, dest_buf_u16[dest_x], mask_buf[dest_x]);
457 }
458 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
459 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
460 mask_buf += mask_stride;
461 }
462 }
463 }
464 else if(mask_buf && opa < LV_OPA_MAX) {
465 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc)) {
466 for(y = 0; y < h; y++) {
467 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
468 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
469 dest_buf_u16[dest_x] = lv_color_8_16_mix(chan_val, dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
470 }
471 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
472 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
473 mask_buf += mask_stride;
474 }
475 }
476 }
477 }
478 else {
479 for(y = 0; y < h; y++) {
480 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
481 uint16_t res = 0;
482 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
483 switch(dsc->blend_mode) {
484 case LV_BLEND_MODE_ADDITIVE:
485 // Additive blending mode
486 res = (LV_MIN(dest_buf_u16[dest_x] + l8_to_rgb565(chan_val), 0xFFFF));
487 break;
488 case LV_BLEND_MODE_SUBTRACTIVE:
489 // Subtractive blending mode
490 res = (LV_MAX(dest_buf_u16[dest_x] - l8_to_rgb565(chan_val), 0));
491 break;
492 case LV_BLEND_MODE_MULTIPLY:
493 // Multiply blending mode
494 res = ((((dest_buf_u16[dest_x] >> 11) * (l8_to_rgb565(chan_val) >> 3)) & 0x1F) << 11) |
495 ((((dest_buf_u16[dest_x] >> 5) & 0x3F) * ((l8_to_rgb565(chan_val) >> 2) & 0x3F) >> 6) << 5) |
496 (((dest_buf_u16[dest_x] & 0x1F) * (l8_to_rgb565(chan_val) & 0x1F)) >> 5);
497 break;
498 default:
499 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
500 return;
501 }
502
503 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
504 dest_buf_u16[dest_x] = res;
505 }
506 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
507 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], opa);
508 }
509 else {
510 if(opa >= LV_OPA_MAX)
511 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], mask_buf[dest_x]);
512 else
513 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
514 }
515 }
516
517 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
518 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
519 if(mask_buf) mask_buf += mask_stride;
520 }
521 }
522 }
523 #endif
524
525 #if LV_DRAW_SW_SUPPORT_AL88
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)526 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
527 {
528 int32_t w = dsc->dest_w;
529 int32_t h = dsc->dest_h;
530 lv_opa_t opa = dsc->opa;
531 uint16_t * dest_buf_u16 = dsc->dest_buf;
532 int32_t dest_stride = dsc->dest_stride;
533 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
534 int32_t src_stride = dsc->src_stride;
535 const lv_opa_t * mask_buf = dsc->mask_buf;
536 int32_t mask_stride = dsc->mask_stride;
537
538 int32_t dest_x;
539 int32_t src_x;
540 int32_t y;
541
542 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
543 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
544 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565(dsc)) {
545 for(y = 0; y < h; y++) {
546 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
547 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_al88[src_x].lumi, dest_buf_u16[dest_x], src_buf_al88[src_x].alpha);
548 }
549 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
550 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
551 }
552 }
553 }
554 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
555 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc)) {
556 for(y = 0; y < h; y++) {
557 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
558 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_al88[src_x].lumi, dest_buf_u16[dest_x],
559 LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa));
560 }
561 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
562 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
563 }
564 }
565 }
566 else if(mask_buf && opa >= LV_OPA_MAX) {
567 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc)) {
568 for(y = 0; y < h; y++) {
569 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
570 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_al88[src_x].lumi, dest_buf_u16[dest_x],
571 LV_OPA_MIX2(src_buf_al88[src_x].alpha, mask_buf[dest_x]));
572 }
573 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
574 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
575 mask_buf += mask_stride;
576 }
577 }
578 }
579 else if(mask_buf && opa < LV_OPA_MAX) {
580 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc)) {
581 for(y = 0; y < h; y++) {
582 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
583 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_al88[src_x].lumi, dest_buf_u16[dest_x],
584 LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[dest_x], opa));
585 }
586 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
587 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
588 mask_buf += mask_stride;
589 }
590 }
591 }
592 }
593 else {
594 uint16_t res = 0;
595 for(y = 0; y < h; y++) {
596 lv_color16_t * dest_buf_c16 = (lv_color16_t *)dest_buf_u16;
597 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
598 uint8_t rb = src_buf_al88[src_x].lumi >> 3;
599 uint8_t g = src_buf_al88[src_x].lumi >> 2;
600 switch(dsc->blend_mode) {
601 case LV_BLEND_MODE_ADDITIVE:
602 res = (LV_MIN(dest_buf_c16[dest_x].red + rb, 31)) << 11;
603 res += (LV_MIN(dest_buf_c16[dest_x].green + g, 63)) << 5;
604 res += LV_MIN(dest_buf_c16[dest_x].blue + rb, 31);
605 break;
606 case LV_BLEND_MODE_SUBTRACTIVE:
607 res = (LV_MAX(dest_buf_c16[dest_x].red - rb, 0)) << 11;
608 res += (LV_MAX(dest_buf_c16[dest_x].green - g, 0)) << 5;
609 res += LV_MAX(dest_buf_c16[dest_x].blue - rb, 0);
610 break;
611 case LV_BLEND_MODE_MULTIPLY:
612 res = ((dest_buf_c16[dest_x].red * rb) >> 5) << 11;
613 res += ((dest_buf_c16[dest_x].green * g) >> 6) << 5;
614 res += (dest_buf_c16[dest_x].blue * rb) >> 5;
615 break;
616 default:
617 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
618 return;
619 }
620 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
621 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], src_buf_al88[src_x].alpha);
622 }
623 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
624 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX2(opa, src_buf_al88[src_x].alpha));
625 }
626 else {
627 if(opa >= LV_OPA_MAX) dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], mask_buf[dest_x]);
628 else dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX3(mask_buf[dest_x], opa,
629 src_buf_al88[src_x].alpha));
630 }
631 }
632
633 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
634 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
635 if(mask_buf) mask_buf += mask_stride;
636 }
637 }
638 }
639
640 #endif
641
642 #if LV_DRAW_SW_SUPPORT_L8
643
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)644 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
645 {
646 int32_t w = dsc->dest_w;
647 int32_t h = dsc->dest_h;
648 lv_opa_t opa = dsc->opa;
649 uint16_t * dest_buf_u16 = dsc->dest_buf;
650 int32_t dest_stride = dsc->dest_stride;
651 const uint8_t * src_buf_l8 = dsc->src_buf;
652 int32_t src_stride = dsc->src_stride;
653 const lv_opa_t * mask_buf = dsc->mask_buf;
654 int32_t mask_stride = dsc->mask_stride;
655
656 int32_t dest_x;
657 int32_t src_x;
658 int32_t y;
659
660 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
661 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
662 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565(dsc)) {
663 for(y = 0; y < h; y++) {
664 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
665 dest_buf_u16[dest_x] = l8_to_rgb565(src_buf_l8[src_x]);
666 }
667 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
668 src_buf_l8 += src_stride;
669 }
670 }
671 }
672 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
673 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc)) {
674 for(y = 0; y < h; y++) {
675 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
676 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_l8[src_x], dest_buf_u16[dest_x], opa);
677 }
678 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
679 src_buf_l8 += src_stride;
680 }
681 }
682 }
683 else if(mask_buf && opa >= LV_OPA_MAX) {
684 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc)) {
685 for(y = 0; y < h; y++) {
686 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
687 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_l8[src_x], dest_buf_u16[dest_x], mask_buf[dest_x]);
688 }
689 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
690 src_buf_l8 += src_stride;
691 mask_buf += mask_stride;
692 }
693 }
694 }
695 else if(mask_buf && opa < LV_OPA_MAX) {
696 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc)) {
697 for(y = 0; y < h; y++) {
698 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x++) {
699 dest_buf_u16[dest_x] = lv_color_8_16_mix(src_buf_l8[src_x], dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
700 }
701 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
702 src_buf_l8 += src_stride;
703 mask_buf += mask_stride;
704 }
705 }
706 }
707 }
708 else {
709 uint16_t res = 0;
710 for(y = 0; y < h; y++) {
711 lv_color16_t * dest_buf_c16 = (lv_color16_t *)dest_buf_u16;
712 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
713 uint8_t rb = src_buf_l8[src_x] >> 3;
714 uint8_t g = src_buf_l8[src_x] >> 2;
715 switch(dsc->blend_mode) {
716 case LV_BLEND_MODE_ADDITIVE:
717 res = (LV_MIN(dest_buf_c16[dest_x].red + rb, 31)) << 11;
718 res += (LV_MIN(dest_buf_c16[dest_x].green + g, 63)) << 5;
719 res += LV_MIN(dest_buf_c16[dest_x].blue + rb, 31);
720 break;
721 case LV_BLEND_MODE_SUBTRACTIVE:
722 res = (LV_MAX(dest_buf_c16[dest_x].red - rb, 0)) << 11;
723 res += (LV_MAX(dest_buf_c16[dest_x].green - g, 0)) << 5;
724 res += LV_MAX(dest_buf_c16[dest_x].blue - rb, 0);
725 break;
726 case LV_BLEND_MODE_MULTIPLY:
727 res = ((dest_buf_c16[dest_x].red * rb) >> 5) << 11;
728 res += ((dest_buf_c16[dest_x].green * g) >> 6) << 5;
729 res += (dest_buf_c16[dest_x].blue * rb) >> 5;
730 break;
731 default:
732 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
733 return;
734 }
735
736 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
737 dest_buf_u16[dest_x] = res;
738 }
739 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
740 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], opa);
741 }
742 else {
743 if(opa >= LV_OPA_MAX) dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], mask_buf[dest_x]);
744 else dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
745 }
746 }
747
748 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
749 src_buf_l8 += src_stride;
750 if(mask_buf) mask_buf += mask_stride;
751 }
752 }
753 }
754
755 #endif
756
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)757 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
758 {
759 int32_t w = dsc->dest_w;
760 int32_t h = dsc->dest_h;
761 lv_opa_t opa = dsc->opa;
762 uint16_t * dest_buf_u16 = dsc->dest_buf;
763 int32_t dest_stride = dsc->dest_stride;
764 const uint16_t * src_buf_u16 = dsc->src_buf;
765 int32_t src_stride = dsc->src_stride;
766 const lv_opa_t * mask_buf = dsc->mask_buf;
767 int32_t mask_stride = dsc->mask_stride;
768
769 int32_t x;
770 int32_t y;
771
772 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
773 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
774 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565(dsc)) {
775 uint32_t line_in_bytes = w * 2;
776 for(y = 0; y < h; y++) {
777 lv_memcpy(dest_buf_u16, src_buf_u16, line_in_bytes);
778 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
779 src_buf_u16 = drawbuf_next_row(src_buf_u16, src_stride);
780 }
781 }
782 }
783 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
784 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc)) {
785 for(y = 0; y < h; y++) {
786 for(x = 0; x < w; x++) {
787 dest_buf_u16[x] = lv_color_16_16_mix(src_buf_u16[x], dest_buf_u16[x], opa);
788 }
789 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
790 src_buf_u16 = drawbuf_next_row(src_buf_u16, src_stride);
791 }
792 }
793 }
794 else if(mask_buf && opa >= LV_OPA_MAX) {
795 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc)) {
796 for(y = 0; y < h; y++) {
797 for(x = 0; x < w; x++) {
798 dest_buf_u16[x] = lv_color_16_16_mix(src_buf_u16[x], dest_buf_u16[x], mask_buf[x]);
799 }
800 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
801 src_buf_u16 = drawbuf_next_row(src_buf_u16, src_stride);
802 mask_buf += mask_stride;
803 }
804 }
805 }
806 else {
807 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc)) {
808 for(y = 0; y < h; y++) {
809 for(x = 0; x < w; x++) {
810 dest_buf_u16[x] = lv_color_16_16_mix(src_buf_u16[x], dest_buf_u16[x], LV_OPA_MIX2(mask_buf[x], opa));
811 }
812 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
813 src_buf_u16 = drawbuf_next_row(src_buf_u16, src_stride);
814 mask_buf += mask_stride;
815 }
816 }
817 }
818 }
819 else {
820 uint16_t res = 0;
821 for(y = 0; y < h; y++) {
822 lv_color16_t * dest_buf_c16 = (lv_color16_t *) dest_buf_u16;
823 lv_color16_t * src_buf_c16 = (lv_color16_t *) src_buf_u16;
824 for(x = 0; x < w; x++) {
825 switch(dsc->blend_mode) {
826 case LV_BLEND_MODE_ADDITIVE:
827 if(src_buf_u16[x] == 0x0000) continue; /*Do not add pure black*/
828 res = (LV_MIN(dest_buf_c16[x].red + src_buf_c16[x].red, 31)) << 11;
829 res += (LV_MIN(dest_buf_c16[x].green + src_buf_c16[x].green, 63)) << 5;
830 res += LV_MIN(dest_buf_c16[x].blue + src_buf_c16[x].blue, 31);
831 break;
832 case LV_BLEND_MODE_SUBTRACTIVE:
833 if(src_buf_u16[x] == 0x0000) continue; /*Do not subtract pure black*/
834 res = (LV_MAX(dest_buf_c16[x].red - src_buf_c16[x].red, 0)) << 11;
835 res += (LV_MAX(dest_buf_c16[x].green - src_buf_c16[x].green, 0)) << 5;
836 res += LV_MAX(dest_buf_c16[x].blue - src_buf_c16[x].blue, 0);
837 break;
838 case LV_BLEND_MODE_MULTIPLY:
839 if(src_buf_u16[x] == 0xffff) continue; /*Do not multiply with pure white (considered as 1)*/
840 res = ((dest_buf_c16[x].red * src_buf_c16[x].red) >> 5) << 11;
841 res += ((dest_buf_c16[x].green * src_buf_c16[x].green) >> 6) << 5;
842 res += (dest_buf_c16[x].blue * src_buf_c16[x].blue) >> 5;
843 break;
844 default:
845 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
846 return;
847 }
848
849 if(mask_buf == NULL) {
850 dest_buf_u16[x] = lv_color_16_16_mix(res, dest_buf_u16[x], opa);
851 }
852 else {
853 if(opa >= LV_OPA_MAX) dest_buf_u16[x] = lv_color_16_16_mix(res, dest_buf_u16[x], mask_buf[x]);
854 else dest_buf_u16[x] = lv_color_16_16_mix(res, dest_buf_u16[x], LV_OPA_MIX2(mask_buf[x], opa));
855 }
856 }
857
858 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
859 src_buf_u16 = drawbuf_next_row(src_buf_u16, src_stride);
860 if(mask_buf) mask_buf += mask_stride;
861 }
862 }
863 }
864
865 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
866
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t src_px_size)867 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, const uint8_t src_px_size)
868 {
869 int32_t w = dsc->dest_w;
870 int32_t h = dsc->dest_h;
871 lv_opa_t opa = dsc->opa;
872 uint16_t * dest_buf_u16 = dsc->dest_buf;
873 int32_t dest_stride = dsc->dest_stride;
874 const uint8_t * src_buf_u8 = dsc->src_buf;
875 int32_t src_stride = dsc->src_stride;
876 const lv_opa_t * mask_buf = dsc->mask_buf;
877 int32_t mask_stride = dsc->mask_stride;
878
879 int32_t dest_x;
880 int32_t src_x;
881 int32_t y;
882
883 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
884 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
885 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565(dsc, src_px_size)) {
886 for(y = 0; y < h; y++) {
887 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
888 dest_buf_u16[dest_x] = ((src_buf_u8[src_x + 2] & 0xF8) << 8) +
889 ((src_buf_u8[src_x + 1] & 0xFC) << 3) +
890 ((src_buf_u8[src_x + 0] & 0xF8) >> 3);
891 }
892 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
893 src_buf_u8 += src_stride;
894 }
895 }
896 }
897 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
898 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc, src_px_size)) {
899 for(y = 0; y < h; y++) {
900 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
901 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x], opa);
902 }
903 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
904 src_buf_u8 += src_stride;
905 }
906 }
907 }
908 if(mask_buf && opa >= LV_OPA_MAX) {
909 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc, src_px_size)) {
910 for(y = 0; y < h; y++) {
911 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
912 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x], mask_buf[dest_x]);
913 }
914 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
915 src_buf_u8 += src_stride;
916 mask_buf += mask_stride;
917 }
918 }
919 }
920 if(mask_buf && opa < LV_OPA_MAX) {
921 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc, src_px_size)) {
922 for(y = 0; y < h; y++) {
923 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
924 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
925 }
926 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
927 src_buf_u8 += src_stride;
928 mask_buf += mask_stride;
929 }
930 }
931 }
932 }
933 else {
934 uint16_t res = 0;
935 for(y = 0; y < h; y++) {
936 lv_color16_t * dest_buf_c16 = (lv_color16_t *) dest_buf_u16;
937 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
938 switch(dsc->blend_mode) {
939 case LV_BLEND_MODE_ADDITIVE:
940 res = (LV_MIN(dest_buf_c16[dest_x].red + (src_buf_u8[src_x + 2] >> 3), 31)) << 11;
941 res += (LV_MIN(dest_buf_c16[dest_x].green + (src_buf_u8[src_x + 1] >> 2), 63)) << 5;
942 res += LV_MIN(dest_buf_c16[dest_x].blue + (src_buf_u8[src_x + 0] >> 3), 31);
943 break;
944 case LV_BLEND_MODE_SUBTRACTIVE:
945 res = (LV_MAX(dest_buf_c16[dest_x].red - (src_buf_u8[src_x + 2] >> 3), 0)) << 11;
946 res += (LV_MAX(dest_buf_c16[dest_x].green - (src_buf_u8[src_x + 1] >> 2), 0)) << 5;
947 res += LV_MAX(dest_buf_c16[dest_x].blue - (src_buf_u8[src_x + 0] >> 3), 0);
948 break;
949 case LV_BLEND_MODE_MULTIPLY:
950 res = ((dest_buf_c16[dest_x].red * (src_buf_u8[src_x + 2] >> 3)) >> 5) << 11;
951 res += ((dest_buf_c16[dest_x].green * (src_buf_u8[src_x + 1] >> 2)) >> 6) << 5;
952 res += (dest_buf_c16[dest_x].blue * (src_buf_u8[src_x + 0] >> 3)) >> 5;
953 break;
954 default:
955 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
956 return;
957 }
958
959 if(mask_buf == NULL) {
960 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], opa);
961 }
962 else {
963 if(opa >= LV_OPA_MAX) dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], mask_buf[dest_x]);
964 else dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX2(mask_buf[dest_x], opa));
965 }
966 }
967 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
968 src_buf_u8 += src_stride;
969 if(mask_buf) mask_buf += mask_stride;
970 }
971
972 }
973 }
974
975 #endif
976
977 #if LV_DRAW_SW_SUPPORT_ARGB8888
978
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)979 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
980 {
981 int32_t w = dsc->dest_w;
982 int32_t h = dsc->dest_h;
983 lv_opa_t opa = dsc->opa;
984 uint16_t * dest_buf_u16 = dsc->dest_buf;
985 int32_t dest_stride = dsc->dest_stride;
986 const uint8_t * src_buf_u8 = dsc->src_buf;
987 int32_t src_stride = dsc->src_stride;
988 const lv_opa_t * mask_buf = dsc->mask_buf;
989 int32_t mask_stride = dsc->mask_stride;
990
991 int32_t dest_x;
992 int32_t src_x;
993 int32_t y;
994
995 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
996 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
997 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565(dsc)) {
998 for(y = 0; y < h; y++) {
999 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
1000 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x], src_buf_u8[src_x + 3]);
1001 }
1002 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
1003 src_buf_u8 += src_stride;
1004 }
1005 }
1006 }
1007 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
1008 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_OPA(dsc)) {
1009 for(y = 0; y < h; y++) {
1010 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
1011 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x], LV_OPA_MIX2(src_buf_u8[src_x + 3],
1012 opa));
1013 }
1014 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
1015 src_buf_u8 += src_stride;
1016 }
1017 }
1018 }
1019 else if(mask_buf && opa >= LV_OPA_MAX) {
1020 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_WITH_MASK(dsc)) {
1021 for(y = 0; y < h; y++) {
1022 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
1023 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x],
1024 LV_OPA_MIX2(src_buf_u8[src_x + 3], mask_buf[dest_x]));
1025 }
1026 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
1027 src_buf_u8 += src_stride;
1028 mask_buf += mask_stride;
1029 }
1030 }
1031 }
1032 else if(mask_buf && opa < LV_OPA_MAX) {
1033 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB565_MIX_MASK_OPA(dsc)) {
1034 for(y = 0; y < h; y++) {
1035 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
1036 dest_buf_u16[dest_x] = lv_color_24_16_mix(&src_buf_u8[src_x], dest_buf_u16[dest_x],
1037 LV_OPA_MIX3(src_buf_u8[src_x + 3], mask_buf[dest_x], opa));
1038 }
1039 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
1040 src_buf_u8 += src_stride;
1041 mask_buf += mask_stride;
1042 }
1043 }
1044 }
1045 }
1046 else {
1047 uint16_t res = 0;
1048 for(y = 0; y < h; y++) {
1049 lv_color16_t * dest_buf_c16 = (lv_color16_t *) dest_buf_u16;
1050 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += 4) {
1051 switch(dsc->blend_mode) {
1052 case LV_BLEND_MODE_ADDITIVE:
1053 res = (LV_MIN(dest_buf_c16[dest_x].red + (src_buf_u8[src_x + 2] >> 3), 31)) << 11;
1054 res += (LV_MIN(dest_buf_c16[dest_x].green + (src_buf_u8[src_x + 1] >> 2), 63)) << 5;
1055 res += LV_MIN(dest_buf_c16[dest_x].blue + (src_buf_u8[src_x + 0] >> 3), 31);
1056 break;
1057 case LV_BLEND_MODE_SUBTRACTIVE:
1058 res = (LV_MAX(dest_buf_c16[dest_x].red - (src_buf_u8[src_x + 2] >> 3), 0)) << 11;
1059 res += (LV_MAX(dest_buf_c16[dest_x].green - (src_buf_u8[src_x + 1] >> 2), 0)) << 5;
1060 res += LV_MAX(dest_buf_c16[dest_x].blue - (src_buf_u8[src_x + 0] >> 3), 0);
1061 break;
1062 case LV_BLEND_MODE_MULTIPLY:
1063 res = ((dest_buf_c16[dest_x].red * (src_buf_u8[src_x + 2] >> 3)) >> 5) << 11;
1064 res += ((dest_buf_c16[dest_x].green * (src_buf_u8[src_x + 1] >> 2)) >> 6) << 5;
1065 res += (dest_buf_c16[dest_x].blue * (src_buf_u8[src_x + 0] >> 3)) >> 5;
1066 break;
1067 default:
1068 LV_LOG_WARN("Not supported blend mode: %d", dsc->blend_mode);
1069 return;
1070 }
1071
1072 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
1073 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], src_buf_u8[src_x + 3]);
1074 }
1075 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
1076 dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX2(opa, src_buf_u8[src_x + 3]));
1077 }
1078 else {
1079 if(opa >= LV_OPA_MAX) dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], mask_buf[dest_x]);
1080 else dest_buf_u16[dest_x] = lv_color_16_16_mix(res, dest_buf_u16[dest_x], LV_OPA_MIX3(mask_buf[dest_x], opa,
1081 src_buf_u8[src_x + 3]));
1082 }
1083 }
1084
1085 dest_buf_u16 = drawbuf_next_row(dest_buf_u16, dest_stride);
1086 src_buf_u8 += src_stride;
1087 if(mask_buf) mask_buf += mask_stride;
1088 }
1089 }
1090 }
1091
1092 #endif
1093
l8_to_rgb565(const uint8_t c1)1094 static inline uint16_t LV_ATTRIBUTE_FAST_MEM l8_to_rgb565(const uint8_t c1)
1095 {
1096 return ((c1 & 0xF8) << 8) + ((c1 & 0xFC) << 3) + ((c1 & 0xF8) >> 3);
1097 }
1098
lv_color_8_16_mix(const uint8_t c1,uint16_t c2,uint8_t mix)1099 static inline uint16_t LV_ATTRIBUTE_FAST_MEM lv_color_8_16_mix(const uint8_t c1, uint16_t c2, uint8_t mix)
1100 {
1101
1102 if(mix == 0) {
1103 return c2;
1104 }
1105 else if(mix == 255) {
1106 return ((c1 & 0xF8) << 8) + ((c1 & 0xFC) << 3) + ((c1 & 0xF8) >> 3);
1107 }
1108 else {
1109 lv_opa_t mix_inv = 255 - mix;
1110
1111 return ((((c1 >> 3) * mix + ((c2 >> 11) & 0x1F) * mix_inv) << 3) & 0xF800) +
1112 ((((c1 >> 2) * mix + ((c2 >> 5) & 0x3F) * mix_inv) >> 3) & 0x07E0) +
1113 (((c1 >> 3) * mix + (c2 & 0x1F) * mix_inv) >> 8);
1114 }
1115 }
1116
lv_color_24_16_mix(const uint8_t * c1,uint16_t c2,uint8_t mix)1117 static inline uint16_t LV_ATTRIBUTE_FAST_MEM lv_color_24_16_mix(const uint8_t * c1, uint16_t c2, uint8_t mix)
1118 {
1119 if(mix == 0) {
1120 return c2;
1121 }
1122 else if(mix == 255) {
1123 return ((c1[2] & 0xF8) << 8) + ((c1[1] & 0xFC) << 3) + ((c1[0] & 0xF8) >> 3);
1124 }
1125 else {
1126 lv_opa_t mix_inv = 255 - mix;
1127
1128 return ((((c1[2] >> 3) * mix + ((c2 >> 11) & 0x1F) * mix_inv) << 3) & 0xF800) +
1129 ((((c1[1] >> 2) * mix + ((c2 >> 5) & 0x3F) * mix_inv) >> 3) & 0x07E0) +
1130 (((c1[0] >> 3) * mix + (c2 & 0x1F) * mix_inv) >> 8);
1131 }
1132 }
1133
1134 #if LV_DRAW_SW_SUPPORT_I1
1135
get_bit(const uint8_t * buf,int32_t bit_idx)1136 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
1137 {
1138 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
1139 }
1140
1141 #endif
1142
drawbuf_next_row(const void * buf,uint32_t stride)1143 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
1144 {
1145 return (void *)((uint8_t *)buf + stride);
1146 }
1147
1148 #endif
1149
1150 #endif
1151