1 /**
2 * @file lv_draw_sw_blend_to_i1.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_i1.h"
10 #if LV_USE_DRAW_SW
11
12 #include "lv_draw_sw_blend_private.h"
13 #include "../../../misc/lv_math.h"
14 #include "../../../display/lv_display.h"
15 #include "../../../core/lv_refr.h"
16 #include "../../../misc/lv_color.h"
17 #include "../../../stdlib/lv_string.h"
18
19 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
20 #include "neon/lv_blend_neon.h"
21 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
22 #include "helium/lv_blend_helium.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
24 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
25 #endif
26
27 /*********************
28 * DEFINES
29 *********************/
30
31 /**********************
32 * TYPEDEFS
33 **********************/
34
35 /**********************
36 * STATIC PROTOTYPES
37 **********************/
38
39 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
40
41 #if LV_DRAW_SW_SUPPORT_L8
42 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
43 #endif
44
45 #if LV_DRAW_SW_SUPPORT_AL88
46 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
47 #endif
48
49 #if LV_DRAW_SW_SUPPORT_RGB565
50 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
51 #endif
52
53 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
54 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
55 const uint8_t src_px_size);
56 #endif
57
58 #if LV_DRAW_SW_SUPPORT_ARGB8888
59 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc);
60 #endif
61
62 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_8_8_mix(const uint8_t src, uint8_t * dest, uint8_t mix);
63
64 static inline void /* LV_ATTRIBUTE_FAST_MEM */ blend_non_normal_pixel(uint8_t * dest_buf, int32_t dest_x,
65 lv_color32_t src,
66 lv_blend_mode_t mode);
67
68
69 static inline void /* LV_ATTRIBUTE_FAST_MEM */ set_bit(uint8_t * buf, int32_t bit_idx);
70
71 static inline void /* LV_ATTRIBUTE_FAST_MEM */ clear_bit(uint8_t * buf, int32_t bit_idx);
72
73 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
74
75 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
76
77 /**********************
78 * STATIC VARIABLES
79 **********************/
80
81 /**********************
82 * MACROS
83 **********************/
84
85 #define I1_LUM_THRESHOLD LV_DRAW_SW_I1_LUM_THRESHOLD
86
87 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1
88 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
89 #endif
90
91 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_OPA
92 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
93 #endif
94
95 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_MASK
96 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
97 #endif
98
99 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
100 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
101 #endif
102
103 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_I1
104 #define LV_DRAW_SW_COLOR_BLEND_TO_I1(...) LV_RESULT_INVALID
105 #endif
106
107 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_OPA
108 #define LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
109 #endif
110
111 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_MASK
112 #define LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
113 #endif
114
115 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_I1_MIX_MASK_OPA
116 #define LV_DRAW_SW_COLOR_BLEND_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
117 #endif
118
119 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1
120 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
121 #endif
122
123 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_OPA
124 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
125 #endif
126
127 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_MASK
128 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
129 #endif
130
131 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
132 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
133 #endif
134
135 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1
136 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
137 #endif
138
139 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_OPA
140 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
141 #endif
142
143 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_MASK
144 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
145 #endif
146
147 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
148 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
149 #endif
150
151 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1
152 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
153 #endif
154
155 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_OPA
156 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
157 #endif
158
159 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_MASK
160 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
161 #endif
162
163 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
164 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
165 #endif
166
167 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1
168 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
169 #endif
170
171 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_OPA
172 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
173 #endif
174
175 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_MASK
176 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
177 #endif
178
179 #ifndef LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
180 #define LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
181 #endif
182
183 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1
184 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1(...) LV_RESULT_INVALID
185 #endif
186
187 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_OPA
188 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_OPA(...) LV_RESULT_INVALID
189 #endif
190
191 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_MASK
192 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_MASK(...) LV_RESULT_INVALID
193 #endif
194
195 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA
196 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(...) LV_RESULT_INVALID
197 #endif
198
199 /**********************
200 * GLOBAL FUNCTIONS
201 **********************/
202
lv_draw_sw_blend_color_to_i1(lv_draw_sw_blend_fill_dsc_t * dsc)203 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_i1(lv_draw_sw_blend_fill_dsc_t * dsc)
204 {
205 int32_t w = dsc->dest_w;
206 int32_t h = dsc->dest_h;
207 lv_opa_t opa = dsc->opa;
208 const lv_opa_t * mask = dsc->mask_buf;
209 int32_t mask_stride = dsc->mask_stride;
210 int32_t dest_stride = dsc->dest_stride;
211
212 uint8_t src_color = lv_color_luminance(dsc->color) / (I1_LUM_THRESHOLD + 1);
213 uint8_t * dest_buf = dsc->dest_buf;
214
215 int32_t bit_ofs = dsc->relative_area.x1 % 8;
216
217 /* Simple fill */
218 if(mask == NULL && opa >= LV_OPA_MAX) {
219 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_I1(dsc)) {
220 for(int32_t y = 0; y < h; y++) {
221 for(int32_t x = 0; x < w; x++) {
222 if(src_color) {
223 set_bit(dest_buf, x + bit_ofs);
224 }
225 else {
226 clear_bit(dest_buf, x + bit_ofs);
227 }
228 }
229 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
230 }
231 }
232 }
233 /* Opacity only */
234 else if(mask == NULL && opa < LV_OPA_MAX) {
235 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_OPA(dsc)) {
236 for(int32_t y = 0; y < h; y++) {
237 for(int32_t x = 0; x < w; x++) {
238 uint8_t * dest_bit = &dest_buf[(x + bit_ofs) / 8];
239 uint8_t current_bit = (*dest_bit >> (7 - ((x + bit_ofs) % 8))) & 0x01;
240 uint8_t new_bit = (opa * src_color + (255 - opa) * current_bit) / 255;
241 if(new_bit) {
242 set_bit(dest_buf, x + bit_ofs);
243 }
244 else {
245 clear_bit(dest_buf, x + bit_ofs);
246 }
247 }
248 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
249 }
250 }
251 }
252 /* Masked with full opacity */
253 else if(mask && opa >= LV_OPA_MAX) {
254 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_I1_WITH_MASK(dsc)) {
255 for(int32_t y = 0; y < h; y++) {
256 for(int32_t x = 0; x < w; x++) {
257 uint8_t mask_val = mask[x];
258 if(mask_val == LV_OPA_TRANSP) continue;
259 if(mask_val == LV_OPA_COVER) {
260 if(src_color) {
261 set_bit(dest_buf, x + bit_ofs);
262 }
263 else {
264 clear_bit(dest_buf, x + bit_ofs);
265 }
266 }
267 else {
268 uint8_t * dest_bit = &dest_buf[(x + bit_ofs) / 8];
269 uint8_t current_bit = (*dest_bit >> (7 - ((x + bit_ofs) % 8))) & 0x01;
270 uint8_t new_bit = (mask_val * src_color + (255 - mask_val) * current_bit) / 255;
271 if(new_bit) {
272 set_bit(dest_buf, x + bit_ofs);
273 }
274 else {
275 clear_bit(dest_buf, x + bit_ofs);
276 }
277 }
278 }
279 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
280 mask += mask_stride;
281 }
282 }
283 }
284 /* Masked with opacity */
285 else {
286 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_I1_MIX_MASK_OPA(dsc)) {
287 for(int32_t y = 0; y < h; y++) {
288 for(int32_t x = 0; x < w; x++) {
289 uint8_t mask_val = mask[x];
290 if(mask_val == LV_OPA_TRANSP) continue;
291 uint8_t * dest_bit = &dest_buf[(x + bit_ofs) / 8];
292 uint8_t current_bit = (*dest_bit >> (7 - ((x + bit_ofs) % 8))) & 0x01;
293 uint8_t blended_opa = (mask_val * opa) / 255;
294 uint8_t new_bit = (blended_opa * src_color + (255 - blended_opa) * current_bit) / 255;
295 if(new_bit) {
296 set_bit(dest_buf, x + bit_ofs);
297 }
298 else {
299 clear_bit(dest_buf, x + bit_ofs);
300 }
301 }
302 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
303 mask += mask_stride;
304 }
305 }
306 }
307 }
308
lv_draw_sw_blend_image_to_i1(lv_draw_sw_blend_image_dsc_t * dsc)309 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_i1(lv_draw_sw_blend_image_dsc_t * dsc)
310 {
311 switch(dsc->src_color_format) {
312 #if LV_DRAW_SW_SUPPORT_RGB565
313 case LV_COLOR_FORMAT_RGB565:
314 rgb565_image_blend(dsc);
315 break;
316 #endif
317 #if LV_DRAW_SW_SUPPORT_RGB888
318 case LV_COLOR_FORMAT_RGB888:
319 rgb888_image_blend(dsc, 3);
320 break;
321 #endif
322 #if LV_DRAW_SW_SUPPORT_XRGB8888
323 case LV_COLOR_FORMAT_XRGB8888:
324 rgb888_image_blend(dsc, 4);
325 break;
326 #endif
327 #if LV_DRAW_SW_SUPPORT_ARGB8888
328 case LV_COLOR_FORMAT_ARGB8888:
329 argb8888_image_blend(dsc);
330 break;
331 #endif
332 #if LV_DRAW_SW_SUPPORT_L8
333 case LV_COLOR_FORMAT_L8:
334 l8_image_blend(dsc);
335 break;
336 #endif
337 #if LV_DRAW_SW_SUPPORT_AL88
338 case LV_COLOR_FORMAT_AL88:
339 al88_image_blend(dsc);
340 break;
341 #endif
342 case LV_COLOR_FORMAT_I1:
343 i1_image_blend(dsc);
344 break;
345 default:
346 LV_LOG_WARN("Not supported source color format");
347 break;
348 }
349 }
350
351 /**********************
352 * STATIC FUNCTIONS
353 **********************/
354
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)355 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
356 {
357 int32_t w = dsc->dest_w;
358 int32_t h = dsc->dest_h;
359 lv_opa_t opa = dsc->opa;
360 uint8_t * dest_buf_i1 = dsc->dest_buf;
361 int32_t dest_stride = dsc->dest_stride;
362 const uint8_t * src_buf_i1 = dsc->src_buf;
363 int32_t src_stride = dsc->src_stride;
364 const lv_opa_t * mask_buf = dsc->mask_buf;
365 int32_t mask_stride = dsc->mask_stride;
366
367 int32_t dest_x;
368 int32_t src_x;
369 int32_t y;
370
371 int32_t bit_ofs = dsc->relative_area.x1 % 8;
372
373 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
374 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
375 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1(dsc)) {
376 for(y = 0; y < h; y++) {
377 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
378 if(get_bit(src_buf_i1, src_x)) {
379 set_bit(dest_buf_i1, dest_x + bit_ofs);
380 }
381 else {
382 clear_bit(dest_buf_i1, dest_x + bit_ofs);
383 }
384 }
385 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
386 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
387 }
388 }
389 }
390 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
391 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
392 for(y = 0; y < h; y++) {
393 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
394 uint8_t src = get_bit(src_buf_i1, src_x);
395 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs);
396 uint8_t blended = (src * opa + dest * (255 - opa));
397 if(blended > I1_LUM_THRESHOLD) {
398 set_bit(dest_buf_i1, dest_x + bit_ofs);
399 }
400 else {
401 clear_bit(dest_buf_i1, dest_x + bit_ofs);
402 }
403 }
404 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
405 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
406 }
407 }
408 }
409 else if(mask_buf && opa >= LV_OPA_MAX) {
410 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
411 for(y = 0; y < h; y++) {
412 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
413 uint8_t mask_val = mask_buf[src_x];
414 uint8_t src = get_bit(src_buf_i1, src_x);
415 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs);
416 uint8_t blended = (src * mask_val + dest * (255 - mask_val));
417 if(blended > I1_LUM_THRESHOLD) {
418 set_bit(dest_buf_i1, dest_x + bit_ofs);
419 }
420 else {
421 clear_bit(dest_buf_i1, dest_x + bit_ofs);
422 }
423 }
424 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
425 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
426 mask_buf += mask_stride;
427 }
428 }
429 }
430 else if(mask_buf && opa < LV_OPA_MAX) {
431 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
432 for(y = 0; y < h; y++) {
433 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
434 uint8_t mask_val = mask_buf[src_x];
435 if(mask_val == LV_OPA_TRANSP) continue;
436 uint8_t src = get_bit(src_buf_i1, src_x);
437 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs);
438 uint8_t blend_opa = LV_OPA_MIX2(mask_val, opa);
439 uint8_t blended = (src * blend_opa + dest * (255 - blend_opa));
440 if(blended > I1_LUM_THRESHOLD) {
441 set_bit(dest_buf_i1, dest_x + bit_ofs);
442 }
443 else {
444 clear_bit(dest_buf_i1, dest_x + bit_ofs);
445 }
446 }
447 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
448 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
449 mask_buf += mask_stride;
450 }
451 }
452 }
453 }
454 else {
455 lv_color32_t src_argb;
456 for(y = 0; y < h; y++) {
457 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
458 src_argb.red = get_bit(src_buf_i1, src_x) * 255;
459 src_argb.green = src_argb.red;
460 src_argb.blue = src_argb.red;
461 if(mask_buf == NULL) src_argb.alpha = opa;
462 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
463 blend_non_normal_pixel(dest_buf_i1, dest_x + bit_ofs, src_argb, dsc->blend_mode);
464 }
465 if(mask_buf) mask_buf += mask_stride;
466 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
467 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
468 }
469 }
470 }
471
472 #if LV_DRAW_SW_SUPPORT_L8
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)473 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
474 {
475 int32_t w = dsc->dest_w;
476 int32_t h = dsc->dest_h;
477 lv_opa_t opa = dsc->opa;
478 uint8_t * dest_buf_i1 = dsc->dest_buf;
479 int32_t dest_stride = dsc->dest_stride;
480 const uint8_t * src_buf_l8 = dsc->src_buf;
481 int32_t src_stride = dsc->src_stride;
482 const lv_opa_t * mask_buf = dsc->mask_buf;
483 int32_t mask_stride = dsc->mask_stride;
484
485 int32_t src_x, dest_x;
486 int32_t y;
487
488 int32_t bit_ofs = dsc->relative_area.x1 % 8;
489
490 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
491 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
492 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1(dsc)) {
493 for(y = 0; y < h; y++) {
494 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
495 if(src_buf_l8[src_x] > I1_LUM_THRESHOLD) {
496 set_bit(dest_buf_i1, dest_x + bit_ofs);
497 }
498 else {
499 clear_bit(dest_buf_i1, dest_x + bit_ofs);
500 }
501 }
502 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
503 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
504 }
505 }
506 }
507 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
508 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
509 for(y = 0; y < h; y++) {
510 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
511 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
512 lv_color_8_8_mix(src_buf_l8[src_x], &dest_val, opa);
513 if(dest_val > I1_LUM_THRESHOLD) {
514 set_bit(dest_buf_i1, dest_x + bit_ofs);
515 }
516 else {
517 clear_bit(dest_buf_i1, dest_x + bit_ofs);
518 }
519 }
520 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
521 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
522 }
523 }
524 }
525 else if(mask_buf && opa >= LV_OPA_MAX) {
526 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
527 for(y = 0; y < h; y++) {
528 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
529 uint8_t src_luminance = src_buf_l8[src_x];
530 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
531 lv_color_8_8_mix(src_luminance, &dest_val, mask_buf[src_x]);
532 if(dest_val > I1_LUM_THRESHOLD) {
533 set_bit(dest_buf_i1, dest_x + bit_ofs);
534 }
535 else {
536 clear_bit(dest_buf_i1, dest_x + bit_ofs);
537 }
538 }
539 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
540 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
541 mask_buf += mask_stride;
542 }
543 }
544 }
545 else if(mask_buf && opa < LV_OPA_MAX) {
546 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
547 for(y = 0; y < h; y++) {
548 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
549 uint8_t src_luminance = src_buf_l8[src_x];
550 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
551 lv_color_8_8_mix(src_luminance, &dest_val, LV_OPA_MIX2(mask_buf[src_x], opa));
552 if(dest_val > I1_LUM_THRESHOLD) {
553 set_bit(dest_buf_i1, dest_x + bit_ofs);
554 }
555 else {
556 clear_bit(dest_buf_i1, dest_x + bit_ofs);
557 }
558 }
559 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
560 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
561 mask_buf += mask_stride;
562 }
563 }
564 }
565 }
566 else {
567 lv_color32_t src_argb;
568 for(y = 0; y < h; y++) {
569 for(src_x = 0; src_x < w; src_x++) {
570 src_argb.red = src_buf_l8[src_x];
571 src_argb.green = src_buf_l8[src_x];
572 src_argb.blue = src_buf_l8[src_x];
573 if(mask_buf == NULL) src_argb.alpha = opa;
574 else src_argb.alpha = LV_OPA_MIX2(mask_buf[src_x], opa);
575 blend_non_normal_pixel(dest_buf_i1, src_x + bit_ofs, src_argb, dsc->blend_mode);
576 }
577 if(mask_buf) mask_buf += mask_stride;
578 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
579 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
580 }
581 }
582 }
583 #endif
584
585 #if LV_DRAW_SW_SUPPORT_AL88
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)586 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
587 {
588 int32_t w = dsc->dest_w;
589 int32_t h = dsc->dest_h;
590 lv_opa_t opa = dsc->opa;
591 uint8_t * dest_buf_i1 = dsc->dest_buf;
592 int32_t dest_stride = dsc->dest_stride;
593 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
594 int32_t src_stride = dsc->src_stride;
595 const lv_opa_t * mask_buf = dsc->mask_buf;
596 int32_t mask_stride = dsc->mask_stride;
597
598 int32_t dest_x;
599 int32_t src_x;
600 int32_t y;
601
602 int32_t bit_ofs = dsc->relative_area.x1 % 8;
603
604 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
605 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
606 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1(dsc)) {
607 for(y = 0; y < h; y++) {
608 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
609 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
610 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_val, src_buf_al88[src_x].alpha);
611 if(dest_val > I1_LUM_THRESHOLD) {
612 set_bit(dest_buf_i1, dest_x + bit_ofs);
613 }
614 else {
615 clear_bit(dest_buf_i1, dest_x + bit_ofs);
616 }
617 }
618 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
619 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
620 }
621 }
622 }
623 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
624 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
625 for(y = 0; y < h; y++) {
626 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
627 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
628 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_val, LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa));
629 if(dest_val > I1_LUM_THRESHOLD) {
630 set_bit(dest_buf_i1, dest_x + bit_ofs);
631 }
632 else {
633 clear_bit(dest_buf_i1, dest_x + bit_ofs);
634 }
635 }
636 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
637 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
638 }
639 }
640 }
641 else if(mask_buf && opa >= LV_OPA_MAX) {
642 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
643 for(y = 0; y < h; y++) {
644 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
645 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
646 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_val, LV_OPA_MIX2(src_buf_al88[src_x].alpha, mask_buf[src_x]));
647 if(dest_val > I1_LUM_THRESHOLD) {
648 set_bit(dest_buf_i1, dest_x + bit_ofs);
649 }
650 else {
651 clear_bit(dest_buf_i1, dest_x + bit_ofs);
652 }
653 }
654 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
655 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
656 mask_buf += mask_stride;
657 }
658 }
659 }
660 else if(mask_buf && opa < LV_OPA_MAX) {
661 if(LV_RESULT_INVALID == LV_DRAW_SW_AL88_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
662 for(y = 0; y < h; y++) {
663 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
664 uint8_t dest_val = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
665 lv_color_8_8_mix(src_buf_al88[src_x].lumi, &dest_val, LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[src_x], opa));
666 if(dest_val > I1_LUM_THRESHOLD) {
667 set_bit(dest_buf_i1, dest_x + bit_ofs);
668 }
669 else {
670 clear_bit(dest_buf_i1, dest_x + bit_ofs);
671 }
672 }
673 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
674 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
675 mask_buf += mask_stride;
676 }
677 }
678 }
679 }
680 else {
681 lv_color32_t src_argb;
682 for(y = 0; y < h; y++) {
683 for(dest_x = 0, src_x = 0; src_x < w; dest_x++, src_x++) {
684 src_argb.red = src_buf_al88[src_x].lumi;
685 src_argb.green = src_buf_al88[src_x].lumi;
686 src_argb.blue = src_buf_al88[src_x].lumi;
687 if(mask_buf == NULL) src_argb.alpha = LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa);
688 else src_argb.alpha = LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[src_x], opa);
689 blend_non_normal_pixel(dest_buf_i1, dest_x + bit_ofs, src_argb, dsc->blend_mode);
690 }
691 if(mask_buf) mask_buf += mask_stride;
692 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
693 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
694 }
695 }
696 }
697 #endif
698
699 #if LV_DRAW_SW_SUPPORT_ARGB8888
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)700 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
701 {
702 int32_t w = dsc->dest_w;
703 int32_t h = dsc->dest_h;
704 lv_opa_t opa = dsc->opa;
705 uint8_t * dest_buf_i1 = dsc->dest_buf;
706 int32_t dest_stride = dsc->dest_stride;
707 const lv_color32_t * src_buf_c32 = dsc->src_buf;
708 int32_t src_stride = dsc->src_stride;
709 const lv_opa_t * mask_buf = dsc->mask_buf;
710 int32_t mask_stride = dsc->mask_stride;
711
712 int32_t x;
713 int32_t y;
714
715 int32_t bit_ofs = dsc->relative_area.x1 % 8;
716
717 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
718 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
719 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1(dsc)) {
720 for(y = 0; y < h; y++) {
721 for(x = 0; x < w; x++) {
722 uint8_t src = lv_color32_luminance(src_buf_c32[x]);
723 uint8_t dest = get_bit(dest_buf_i1, x + bit_ofs) * 255;
724 lv_color_8_8_mix(src, &dest, src_buf_c32[x].alpha);
725 if(dest > I1_LUM_THRESHOLD) {
726 set_bit(dest_buf_i1, x + bit_ofs);
727 }
728 else {
729 clear_bit(dest_buf_i1, x + bit_ofs);
730 }
731 }
732 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
733 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
734 }
735 }
736 }
737 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
738 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
739 for(y = 0; y < h; y++) {
740 for(x = 0; x < w; x++) {
741 uint8_t src = lv_color32_luminance(src_buf_c32[x]);
742 uint8_t dest = get_bit(dest_buf_i1, x + bit_ofs) * 255;
743 lv_color_8_8_mix(src, &dest, LV_OPA_MIX2(opa, src_buf_c32[x].alpha));
744 if(dest > I1_LUM_THRESHOLD) {
745 set_bit(dest_buf_i1, x + bit_ofs);
746 }
747 else {
748 clear_bit(dest_buf_i1, x + bit_ofs);
749 }
750 }
751 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
752 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
753 }
754 }
755 }
756 else if(mask_buf && opa >= LV_OPA_MAX) {
757 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
758 for(y = 0; y < h; y++) {
759 for(x = 0; x < w; x++) {
760 uint8_t src = lv_color32_luminance(src_buf_c32[x]);
761 uint8_t dest = get_bit(dest_buf_i1, x + bit_ofs) * 255;
762 lv_color_8_8_mix(src, &dest, LV_OPA_MIX2(mask_buf[x], src_buf_c32[x].alpha));
763 if(dest > I1_LUM_THRESHOLD) {
764 set_bit(dest_buf_i1, x + bit_ofs);
765 }
766 else {
767 clear_bit(dest_buf_i1, x + bit_ofs);
768 }
769 }
770 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
771 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
772 mask_buf += mask_stride;
773 }
774 }
775 }
776 else if(mask_buf && opa < LV_OPA_MAX) {
777 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
778 for(y = 0; y < h; y++) {
779 for(x = 0; x < w; x++) {
780 uint8_t src = lv_color32_luminance(src_buf_c32[x]);
781 uint8_t dest = get_bit(dest_buf_i1, x + bit_ofs) * 255;
782 lv_color_8_8_mix(src, &dest, LV_OPA_MIX3(opa, mask_buf[x], src_buf_c32[x].alpha));
783 if(dest > I1_LUM_THRESHOLD) {
784 set_bit(dest_buf_i1, x + bit_ofs);
785 }
786 else {
787 clear_bit(dest_buf_i1, x + bit_ofs);
788 }
789 }
790 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
791 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
792 mask_buf += mask_stride;
793 }
794 }
795 }
796 }
797 else {
798 for(y = 0; y < h; y++) {
799 for(x = 0; x < w; x++) {
800 lv_color32_t color_argb = src_buf_c32[x];
801 if(mask_buf == NULL) color_argb.alpha = LV_OPA_MIX2(color_argb.alpha, opa);
802 else color_argb.alpha = LV_OPA_MIX3(color_argb.alpha, mask_buf[x], opa);
803 blend_non_normal_pixel(dest_buf_i1, x + bit_ofs, color_argb, dsc->blend_mode);
804 }
805 if(mask_buf) mask_buf += mask_stride;
806 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
807 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
808 }
809 }
810 }
811 #endif
812
813 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t src_px_size)814 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
815 const uint8_t src_px_size)
816 {
817 int32_t w = dsc->dest_w;
818 int32_t h = dsc->dest_h;
819 lv_opa_t opa = dsc->opa;
820 uint8_t * dest_buf_i1 = dsc->dest_buf;
821 int32_t dest_stride = dsc->dest_stride;
822 const uint8_t * src_buf_rgb888 = dsc->src_buf;
823 int32_t src_stride = dsc->src_stride;
824 const lv_opa_t * mask_buf = dsc->mask_buf;
825 int32_t mask_stride = dsc->mask_stride;
826
827 int32_t dest_x;
828 int32_t src_x;
829 int32_t y;
830
831
832 int32_t bit_ofs = dsc->relative_area.x1 % 8;
833
834 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
835 /*Special case*/
836 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
837 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1(dsc)) {
838 for(y = 0; y < h; y++) {
839 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
840 uint8_t src = lv_color24_luminance(&src_buf_rgb888[src_x]);
841 if(src > I1_LUM_THRESHOLD) {
842 set_bit(dest_buf_i1, dest_x + bit_ofs);
843 }
844 else {
845 clear_bit(dest_buf_i1, dest_x + bit_ofs);
846 }
847 }
848 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
849 src_buf_rgb888 = drawbuf_next_row(src_buf_rgb888, src_stride);
850 }
851 }
852 }
853 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
854 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
855 for(y = 0; y < h; y++) {
856 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
857 uint8_t src = lv_color24_luminance(&src_buf_rgb888[src_x]);
858 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
859 lv_color_8_8_mix(src, &dest, opa);
860 if(dest > I1_LUM_THRESHOLD) {
861 set_bit(dest_buf_i1, dest_x + bit_ofs);
862 }
863 else {
864 clear_bit(dest_buf_i1, dest_x + bit_ofs);
865 }
866 }
867 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
868 src_buf_rgb888 = drawbuf_next_row(src_buf_rgb888, src_stride);
869 }
870 }
871 }
872 else if(mask_buf && opa >= LV_OPA_MAX) {
873 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
874 uint32_t mask_x;
875 for(y = 0; y < h; y++) {
876 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
877 uint8_t src = lv_color24_luminance(&src_buf_rgb888[src_x]);
878 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
879 lv_color_8_8_mix(src, &dest, mask_buf[mask_x]);
880 if(dest > I1_LUM_THRESHOLD) {
881 set_bit(dest_buf_i1, dest_x + bit_ofs);
882 }
883 else {
884 clear_bit(dest_buf_i1, dest_x + bit_ofs);
885 }
886 }
887 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
888 src_buf_rgb888 = drawbuf_next_row(src_buf_rgb888, src_stride);
889 mask_buf += mask_stride;
890 }
891 }
892 }
893 else if(mask_buf && opa < LV_OPA_MAX) {
894 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
895 uint32_t mask_x;
896 for(y = 0; y < h; y++) {
897 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x += src_px_size) {
898 uint8_t src = lv_color24_luminance(&src_buf_rgb888[src_x]);
899 uint8_t dest = get_bit(dest_buf_i1, dest_x + bit_ofs) * 255;
900 lv_color_8_8_mix(src, &dest, LV_OPA_MIX2(mask_buf[mask_x], opa));
901 if(dest > I1_LUM_THRESHOLD) {
902 set_bit(dest_buf_i1, dest_x + bit_ofs);
903 }
904 else {
905 clear_bit(dest_buf_i1, dest_x + bit_ofs);
906 }
907 }
908 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
909 src_buf_rgb888 = drawbuf_next_row(src_buf_rgb888, src_stride);
910 mask_buf += mask_stride;
911 }
912 }
913 }
914 else {
915 lv_color32_t src_argb;
916 for(y = 0; y < h; y++) {
917 for(dest_x = 0, src_x = 0; dest_x < w; dest_x++, src_x += src_px_size) {
918 src_argb.red = src_buf_rgb888[src_x + 2];
919 src_argb.green = src_buf_rgb888[src_x + 1];
920 src_argb.blue = src_buf_rgb888[src_x + 0];
921 if(mask_buf == NULL) src_argb.alpha = opa;
922 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
923
924 blend_non_normal_pixel(dest_buf_i1, dest_x + bit_ofs, src_argb, dsc->blend_mode);
925 }
926 if(mask_buf) mask_buf += mask_stride;
927 dest_buf_i1 = drawbuf_next_row(dest_buf_i1, dest_stride);
928 src_buf_rgb888 = drawbuf_next_row(src_buf_rgb888, src_stride);
929 }
930 }
931 }
932 }
933 #endif
934
935 #if LV_DRAW_SW_SUPPORT_RGB565
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)936 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc)
937 {
938 int32_t w = dsc->dest_w;
939 int32_t h = dsc->dest_h;
940 lv_opa_t opa = dsc->opa;
941 uint8_t * dest_buf_u8 = dsc->dest_buf;
942 int32_t dest_stride = dsc->dest_stride;
943 const lv_color16_t * src_buf_c16 = (const lv_color16_t *)dsc->src_buf;
944 int32_t src_stride = dsc->src_stride;
945 const lv_opa_t * mask_buf = dsc->mask_buf;
946 int32_t mask_stride = dsc->mask_stride;
947 int32_t bit_ofs = dsc->relative_area.x1 % 8;
948
949 int32_t src_x;
950 int32_t dest_x;
951 int32_t y;
952
953 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
954 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
955 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1(dsc)) {
956 for(y = 0; y < h; y++) {
957 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
958 uint8_t src = lv_color16_luminance(src_buf_c16[src_x]);
959 if(src > I1_LUM_THRESHOLD) {
960 set_bit(dest_buf_u8, dest_x + bit_ofs);
961 }
962 else {
963 clear_bit(dest_buf_u8, dest_x + bit_ofs);
964 }
965 }
966 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
967 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
968 }
969 }
970 }
971 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
972 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_OPA(dsc)) {
973 for(y = 0; y < h; y++) {
974 for(src_x = 0, dest_x = 0; src_x < w; dest_x++, src_x++) {
975 uint8_t src = lv_color16_luminance(src_buf_c16[src_x]);
976 uint8_t dest = get_bit(dest_buf_u8, dest_x + bit_ofs) * 255;
977 lv_color_8_8_mix(src, &dest, opa);
978 if(dest > I1_LUM_THRESHOLD) {
979 set_bit(dest_buf_u8, dest_x + bit_ofs);
980 }
981 else {
982 clear_bit(dest_buf_u8, dest_x + bit_ofs);
983 }
984 }
985 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
986 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
987 }
988 }
989 }
990 else if(mask_buf && opa >= LV_OPA_MAX) {
991 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_WITH_MASK(dsc)) {
992 uint32_t mask_x;
993 for(y = 0; y < h; y++) {
994 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x++) {
995 uint8_t src = lv_color16_luminance(src_buf_c16[src_x]);
996 uint8_t dest = get_bit(dest_buf_u8, dest_x + bit_ofs) * 255;
997 lv_color_8_8_mix(src, &dest, mask_buf[mask_x]);
998 if(dest > I1_LUM_THRESHOLD) {
999 set_bit(dest_buf_u8, dest_x + bit_ofs);
1000 }
1001 else {
1002 clear_bit(dest_buf_u8, dest_x + bit_ofs);
1003 }
1004 }
1005 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
1006 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
1007 mask_buf += mask_stride;
1008 }
1009 }
1010 }
1011 else if(mask_buf && opa < LV_OPA_MAX) {
1012 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_I1_MIX_MASK_OPA(dsc)) {
1013 uint32_t mask_x;
1014 for(y = 0; y < h; y++) {
1015 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x++, src_x++) {
1016 uint8_t src = lv_color16_luminance(src_buf_c16[src_x]);
1017 uint8_t dest = get_bit(dest_buf_u8, dest_x + bit_ofs) * 255;
1018 lv_color_8_8_mix(src, &dest, LV_OPA_MIX2(mask_buf[mask_x], opa));
1019 if(dest > I1_LUM_THRESHOLD) {
1020 set_bit(dest_buf_u8, dest_x + bit_ofs);
1021 }
1022 else {
1023 clear_bit(dest_buf_u8, dest_x + bit_ofs);
1024 }
1025 }
1026 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
1027 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
1028 mask_buf += mask_stride;
1029 }
1030 }
1031 }
1032 }
1033 else {
1034 lv_color32_t src_argb;
1035 for(y = 0; y < h; y++) {
1036 for(src_x = 0, dest_x = 0; src_x < w; src_x++, dest_x++) {
1037 src_argb.red = (src_buf_c16[src_x].red * 2106) >> 8;
1038 src_argb.green = (src_buf_c16[src_x].green * 1037) >> 8;
1039 src_argb.blue = (src_buf_c16[src_x].blue * 2106) >> 8;
1040 if(mask_buf == NULL) src_argb.alpha = opa;
1041 else src_argb.alpha = LV_OPA_MIX2(mask_buf[src_x], opa);
1042
1043 blend_non_normal_pixel(dest_buf_u8, dest_x + bit_ofs, src_argb, dsc->blend_mode);
1044 }
1045 if(mask_buf) mask_buf += mask_stride;
1046 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
1047 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
1048 }
1049 }
1050 }
1051 #endif
1052
blend_non_normal_pixel(uint8_t * dest_buf,int32_t dest_x,lv_color32_t src,lv_blend_mode_t mode)1053 static inline void LV_ATTRIBUTE_FAST_MEM blend_non_normal_pixel(uint8_t * dest_buf, int32_t dest_x, lv_color32_t src,
1054 lv_blend_mode_t mode)
1055 {
1056 uint8_t res;
1057 int32_t src_lumi = lv_color32_luminance(src);
1058 uint8_t dest_lumi = get_bit(dest_buf, dest_x) * 255;
1059 switch(mode) {
1060 case LV_BLEND_MODE_ADDITIVE:
1061 res = LV_MIN(dest_lumi + src_lumi, 255);
1062 break;
1063 case LV_BLEND_MODE_SUBTRACTIVE:
1064 res = LV_MAX(dest_lumi - src_lumi, 0);
1065 break;
1066 case LV_BLEND_MODE_MULTIPLY:
1067 res = (dest_lumi * src_lumi) >> 8;
1068 break;
1069 default:
1070 LV_LOG_WARN("Not supported blend mode: %d", mode);
1071 return;
1072 }
1073
1074 lv_color_8_8_mix(res, &dest_lumi, src.alpha);
1075 if(dest_lumi > I1_LUM_THRESHOLD) {
1076 set_bit(dest_buf, dest_x);
1077 }
1078 else {
1079 clear_bit(dest_buf, dest_x);
1080 }
1081 }
1082
lv_color_8_8_mix(const uint8_t src,uint8_t * dest,uint8_t mix)1083 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_8_8_mix(const uint8_t src, uint8_t * dest, uint8_t mix)
1084 {
1085
1086 if(mix == 0) return;
1087
1088 if(mix >= LV_OPA_MAX) {
1089 *dest = src;
1090 }
1091 else {
1092 lv_opa_t mix_inv = 255 - mix;
1093 *dest = (uint32_t)((uint32_t)src * mix + dest[0] * mix_inv) >> 8;
1094 }
1095 }
1096
drawbuf_next_row(const void * buf,uint32_t stride)1097 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
1098 {
1099 return (void *)((uint8_t *)buf + stride);
1100 }
1101
set_bit(uint8_t * buf,int32_t bit_idx)1102 static inline void LV_ATTRIBUTE_FAST_MEM set_bit(uint8_t * buf, int32_t bit_idx)
1103 {
1104 buf[bit_idx / 8] |= (1 << (7 - (bit_idx % 8)));
1105 }
1106
clear_bit(uint8_t * buf,int32_t bit_idx)1107 static inline void LV_ATTRIBUTE_FAST_MEM clear_bit(uint8_t * buf, int32_t bit_idx)
1108 {
1109 buf[bit_idx / 8] &= ~(1 << (7 - (bit_idx % 8)));
1110 }
1111
get_bit(const uint8_t * buf,int32_t bit_idx)1112 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
1113 {
1114 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
1115 }
1116
1117 #endif
1118