1 /**
2 * @file lv_draw_sw_blend_to_rgb888.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_blend_to_rgb888.h"
10 #if LV_USE_DRAW_SW
11
12 #if LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888
13
14 #include "lv_draw_sw_blend_private.h"
15 #include "../../../misc/lv_math.h"
16 #include "../../../display/lv_display.h"
17 #include "../../../core/lv_refr.h"
18 #include "../../../misc/lv_color.h"
19 #include "../../../stdlib/lv_string.h"
20
21 #if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
22 #include "neon/lv_blend_neon.h"
23 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM
24 #include "helium/lv_blend_helium.h"
25 #elif LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_CUSTOM
26 #include LV_DRAW_SW_ASM_CUSTOM_INCLUDE
27 #endif
28
29 /*********************
30 * DEFINES
31 *********************/
32
33 /**********************
34 * TYPEDEFS
35 **********************/
36
37 /**********************
38 * STATIC PROTOTYPES
39 **********************/
40
41 #if LV_DRAW_SW_SUPPORT_AL88
42 static void /* LV_ATTRIBUTE_FAST_MEM */ al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size);
43 #endif
44
45 #if LV_DRAW_SW_SUPPORT_I1
46 static void /* LV_ATTRIBUTE_FAST_MEM */ i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size);
47
48 static inline uint8_t /* LV_ATTRIBUTE_FAST_MEM */ get_bit(const uint8_t * buf, int32_t bit_idx);
49 #endif
50
51 #if LV_DRAW_SW_SUPPORT_L8
52 static void /* LV_ATTRIBUTE_FAST_MEM */ l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size);
53 #endif
54
55 #if LV_DRAW_SW_SUPPORT_RGB565
56 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size);
57 #endif
58
59 static void /* LV_ATTRIBUTE_FAST_MEM */ rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
60 const uint8_t dest_px_size,
61 uint32_t src_px_size);
62
63 #if LV_DRAW_SW_SUPPORT_ARGB8888
64 static void /* LV_ATTRIBUTE_FAST_MEM */ argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,
65 uint32_t dest_px_size);
66 #endif
67
68 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_8_24_mix(const uint8_t src, uint8_t * dest, uint8_t mix);
69
70 static inline void /* LV_ATTRIBUTE_FAST_MEM */ lv_color_24_24_mix(const uint8_t * src, uint8_t * dest, uint8_t mix);
71
72 static inline void /* LV_ATTRIBUTE_FAST_MEM */ blend_non_normal_pixel(uint8_t * dest, lv_color32_t src,
73 lv_blend_mode_t mode);
74 static inline void * /* LV_ATTRIBUTE_FAST_MEM */ drawbuf_next_row(const void * buf, uint32_t stride);
75
76 /**********************
77 * STATIC VARIABLES
78 **********************/
79
80 /**********************
81 * MACROS
82 **********************/
83
84 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB888
85 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB888(...) LV_RESULT_INVALID
86 #endif
87
88 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_OPA
89 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_OPA(...) LV_RESULT_INVALID
90 #endif
91
92 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_MASK
93 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_MASK(...) LV_RESULT_INVALID
94 #endif
95
96 #ifndef LV_DRAW_SW_COLOR_BLEND_TO_RGB888_MIX_MASK_OPA
97 #define LV_DRAW_SW_COLOR_BLEND_TO_RGB888_MIX_MASK_OPA(...) LV_RESULT_INVALID
98 #endif
99
100 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888
101 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888(...) LV_RESULT_INVALID
102 #endif
103
104 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_OPA
105 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_OPA(...) LV_RESULT_INVALID
106 #endif
107
108 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_MASK
109 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_MASK(...) LV_RESULT_INVALID
110 #endif
111
112 #ifndef LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA
113 #define LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(...) LV_RESULT_INVALID
114 #endif
115
116 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888
117 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888(...) LV_RESULT_INVALID
118 #endif
119
120 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_OPA
121 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_OPA(...) LV_RESULT_INVALID
122 #endif
123
124 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_MASK
125 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_MASK(...) LV_RESULT_INVALID
126 #endif
127
128 #ifndef LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA
129 #define LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(...) LV_RESULT_INVALID
130 #endif
131
132 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888
133 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888(...) LV_RESULT_INVALID
134 #endif
135
136 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_OPA
137 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_OPA(...) LV_RESULT_INVALID
138 #endif
139
140 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_MASK
141 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_MASK(...) LV_RESULT_INVALID
142 #endif
143
144 #ifndef LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA
145 #define LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(...) LV_RESULT_INVALID
146 #endif
147
148 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888
149 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888(...) LV_RESULT_INVALID
150 #endif
151
152 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_OPA
153 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_OPA(...) LV_RESULT_INVALID
154 #endif
155
156 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_MASK
157 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_MASK(...) LV_RESULT_INVALID
158 #endif
159
160 #ifndef LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA
161 #define LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(...) LV_RESULT_INVALID
162 #endif
163
164 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_888
165 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_888(...) LV_RESULT_INVALID
166 #endif
167
168 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_OPA
169 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_OPA(...) LV_RESULT_INVALID
170 #endif
171
172 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_MASK
173 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_MASK(...) LV_RESULT_INVALID
174 #endif
175
176 #ifndef LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_MIX_MASK_OPA
177 #define LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_MIX_MASK_OPA(...) LV_RESULT_INVALID
178 #endif
179
180 /**********************
181 * GLOBAL FUNCTIONS
182 **********************/
183
lv_draw_sw_blend_color_to_rgb888(lv_draw_sw_blend_fill_dsc_t * dsc,uint32_t dest_px_size)184 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_color_to_rgb888(lv_draw_sw_blend_fill_dsc_t * dsc, uint32_t dest_px_size)
185 {
186 int32_t w = dsc->dest_w;
187 int32_t h = dsc->dest_h;
188 lv_opa_t opa = dsc->opa;
189 const lv_opa_t * mask = dsc->mask_buf;
190 int32_t mask_stride = dsc->mask_stride;
191 int32_t dest_stride = dsc->dest_stride;
192
193 int32_t x;
194 int32_t y;
195
196 LV_UNUSED(w);
197 LV_UNUSED(h);
198 LV_UNUSED(x);
199 LV_UNUSED(y);
200 LV_UNUSED(opa);
201 LV_UNUSED(mask);
202 LV_UNUSED(mask_stride);
203 LV_UNUSED(dest_stride);
204
205 /*Simple fill*/
206 if(mask == NULL && opa >= LV_OPA_MAX) {
207 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB888(dsc, dest_px_size)) {
208 if(dest_px_size == 3) {
209 uint8_t * dest_buf_u8 = dsc->dest_buf;
210 uint8_t * dest_buf_ori = dsc->dest_buf;
211 w *= dest_px_size;
212
213 for(x = 0; x < w; x += 3) {
214 dest_buf_u8[x + 0] = dsc->color.blue;
215 dest_buf_u8[x + 1] = dsc->color.green;
216 dest_buf_u8[x + 2] = dsc->color.red;
217 }
218
219 dest_buf_u8 += dest_stride;
220
221 for(y = 1; y < h; y++) {
222 lv_memcpy(dest_buf_u8, dest_buf_ori, w);
223 dest_buf_u8 += dest_stride;
224 }
225 }
226 if(dest_px_size == 4) {
227 uint32_t color32 = lv_color_to_u32(dsc->color);
228 uint32_t * dest_buf_u32 = dsc->dest_buf;
229 for(y = 0; y < h; y++) {
230 for(x = 0; x <= w - 16; x += 16) {
231 dest_buf_u32[x + 0] = color32;
232 dest_buf_u32[x + 1] = color32;
233 dest_buf_u32[x + 2] = color32;
234 dest_buf_u32[x + 3] = color32;
235
236 dest_buf_u32[x + 4] = color32;
237 dest_buf_u32[x + 5] = color32;
238 dest_buf_u32[x + 6] = color32;
239 dest_buf_u32[x + 7] = color32;
240
241 dest_buf_u32[x + 8] = color32;
242 dest_buf_u32[x + 9] = color32;
243 dest_buf_u32[x + 10] = color32;
244 dest_buf_u32[x + 11] = color32;
245
246 dest_buf_u32[x + 12] = color32;
247 dest_buf_u32[x + 13] = color32;
248 dest_buf_u32[x + 14] = color32;
249 dest_buf_u32[x + 15] = color32;
250 }
251 for(; x < w; x ++) {
252 dest_buf_u32[x] = color32;
253 }
254
255 dest_buf_u32 = drawbuf_next_row(dest_buf_u32, dest_stride);
256 }
257 }
258 }
259 }
260 /*Opacity only*/
261 else if(mask == NULL && opa < LV_OPA_MAX) {
262 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_OPA(dsc, dest_px_size)) {
263 uint32_t color32 = lv_color_to_u32(dsc->color);
264 uint8_t * dest_buf = dsc->dest_buf;
265 w *= dest_px_size;
266 for(y = 0; y < h; y++) {
267 for(x = 0; x < w; x += dest_px_size) {
268 lv_color_24_24_mix((const uint8_t *)&color32, &dest_buf[x], opa);
269 }
270
271 dest_buf = drawbuf_next_row(dest_buf, dest_stride);
272 }
273 }
274 }
275 /*Masked with full opacity*/
276 else if(mask && opa >= LV_OPA_MAX) {
277 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB888_WITH_MASK(dsc, dest_px_size)) {
278 uint32_t color32 = lv_color_to_u32(dsc->color);
279 uint8_t * dest_buf = dsc->dest_buf;
280 w *= dest_px_size;
281
282 for(y = 0; y < h; y++) {
283 uint32_t mask_x;
284 for(x = 0, mask_x = 0; x < w; x += dest_px_size, mask_x++) {
285 lv_color_24_24_mix((const uint8_t *)&color32, &dest_buf[x], mask[mask_x]);
286 }
287 dest_buf += dest_stride;
288 mask += mask_stride;
289 }
290 }
291 }
292 /*Masked with opacity*/
293 else {
294 if(LV_RESULT_INVALID == LV_DRAW_SW_COLOR_BLEND_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size)) {
295 uint32_t color32 = lv_color_to_u32(dsc->color);
296 uint8_t * dest_buf = dsc->dest_buf;
297 w *= dest_px_size;
298
299 for(y = 0; y < h; y++) {
300 uint32_t mask_x;
301 for(x = 0, mask_x = 0; x < w; x += dest_px_size, mask_x++) {
302 lv_color_24_24_mix((const uint8_t *) &color32, &dest_buf[x], LV_OPA_MIX2(opa, mask[mask_x]));
303 }
304 dest_buf += dest_stride;
305 mask += mask_stride;
306 }
307 }
308 }
309 }
310
lv_draw_sw_blend_image_to_rgb888(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)311 void LV_ATTRIBUTE_FAST_MEM lv_draw_sw_blend_image_to_rgb888(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
312 {
313
314 switch(dsc->src_color_format) {
315 #if LV_DRAW_SW_SUPPORT_RGB565
316 case LV_COLOR_FORMAT_RGB565:
317 rgb565_image_blend(dsc, dest_px_size);
318 break;
319 #endif
320 case LV_COLOR_FORMAT_RGB888:
321 rgb888_image_blend(dsc, dest_px_size, 3);
322 break;
323 #if LV_DRAW_SW_SUPPORT_XRGB8888
324 case LV_COLOR_FORMAT_XRGB8888:
325 rgb888_image_blend(dsc, dest_px_size, 4);
326 break;
327 #endif
328 #if LV_DRAW_SW_SUPPORT_ARGB8888
329 case LV_COLOR_FORMAT_ARGB8888:
330 argb8888_image_blend(dsc, dest_px_size);
331 break;
332 #endif
333 #if LV_DRAW_SW_SUPPORT_L8
334 case LV_COLOR_FORMAT_L8:
335 l8_image_blend(dsc, dest_px_size);
336 break;
337 #endif
338 #if LV_DRAW_SW_SUPPORT_AL88
339 case LV_COLOR_FORMAT_AL88:
340 al88_image_blend(dsc, dest_px_size);
341 break;
342 #endif
343 #if LV_DRAW_SW_SUPPORT_I1
344 case LV_COLOR_FORMAT_I1:
345 i1_image_blend(dsc, dest_px_size);
346 break;
347 #endif
348 default:
349 LV_LOG_WARN("Not supported source color format");
350 break;
351 }
352 }
353
354 /**********************
355 * STATIC FUNCTIONS
356 **********************/
357
358 #if LV_DRAW_SW_SUPPORT_I1
i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)359 static void LV_ATTRIBUTE_FAST_MEM i1_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
360 {
361 int32_t w = dsc->dest_w;
362 int32_t h = dsc->dest_h;
363 lv_opa_t opa = dsc->opa;
364 uint8_t * dest_buf_u8 = dsc->dest_buf;
365 int32_t dest_stride = dsc->dest_stride;
366 const uint8_t * src_buf_i1 = dsc->src_buf;
367 int32_t src_stride = dsc->src_stride;
368 const lv_opa_t * mask_buf = dsc->mask_buf;
369 int32_t mask_stride = dsc->mask_stride;
370
371 int32_t dest_x;
372 int32_t src_x;
373 int32_t y;
374
375 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
376 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
377 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_888(dsc)) {
378 for(y = 0; y < h; y++) {
379 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
380 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
381 dest_buf_u8[dest_x + 2] = chan_val;
382 dest_buf_u8[dest_x + 1] = chan_val;
383 dest_buf_u8[dest_x + 0] = chan_val;
384 }
385 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
386 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
387 }
388 }
389 }
390 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
391 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_OPA(dsc)) {
392 for(y = 0; y < h; y++) {
393 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
394 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
395 lv_color_8_24_mix(chan_val, &dest_buf_u8[dest_x], opa);
396 }
397 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
398 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
399 }
400 }
401 }
402 else if(mask_buf && opa >= LV_OPA_MAX) {
403 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_WITH_MASK(dsc)) {
404 for(y = 0; y < h; y++) {
405 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
406 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
407 lv_color_8_24_mix(chan_val, &dest_buf_u8[dest_x], mask_buf[src_x]);
408 }
409 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
410 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
411 mask_buf += mask_stride;
412 }
413 }
414 }
415 else if(mask_buf && opa < LV_OPA_MAX) {
416 if(LV_RESULT_INVALID == LV_DRAW_SW_I1_BLEND_NORMAL_TO_888_MIX_MASK_OPA(dsc)) {
417 for(y = 0; y < h; y++) {
418 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
419 uint8_t chan_val = get_bit(src_buf_i1, src_x) * 255;
420 lv_color_8_24_mix(chan_val, &dest_buf_u8[dest_x], LV_OPA_MIX2(opa, mask_buf[src_x]));
421 }
422 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
423 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
424 mask_buf += mask_stride;
425 }
426 }
427 }
428 }
429 else {
430 for(y = 0; y < h; y++) {
431 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
432 lv_color32_t src_argb;
433 src_argb.red = get_bit(src_buf_i1, src_x) * 255;
434 src_argb.green = src_argb.red;
435 src_argb.blue = src_argb.red;
436 if(mask_buf == NULL) src_argb.alpha = opa;
437 else src_argb.alpha = LV_OPA_MIX2(mask_buf[src_x], opa);
438 blend_non_normal_pixel(&dest_buf_u8[dest_x], src_argb, dsc->blend_mode);
439 }
440 if(mask_buf) mask_buf += mask_stride;
441 dest_buf_u8 = drawbuf_next_row(dest_buf_u8, dest_stride);
442 src_buf_i1 = drawbuf_next_row(src_buf_i1, src_stride);
443 }
444 }
445 }
446 #endif
447
448 #if LV_DRAW_SW_SUPPORT_AL88
al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)449 static void LV_ATTRIBUTE_FAST_MEM al88_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
450 {
451 int32_t w = dsc->dest_w;
452 int32_t h = dsc->dest_h;
453 lv_opa_t opa = dsc->opa;
454 uint8_t * dest_buf_u8 = dsc->dest_buf;
455 int32_t dest_stride = dsc->dest_stride;
456 const lv_color16a_t * src_buf_al88 = dsc->src_buf;
457 int32_t src_stride = dsc->src_stride;
458 const lv_opa_t * mask_buf = dsc->mask_buf;
459 int32_t mask_stride = dsc->mask_stride;
460
461 int32_t dest_x;
462 int32_t src_x;
463 int32_t y;
464
465 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
466 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
467 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888(dsc, dest_px_size)) {
468 for(y = 0; y < h; y++) {
469 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
470 lv_color_8_24_mix(src_buf_al88[src_x].lumi, &dest_buf_u8[dest_x], src_buf_al88[src_x].alpha);
471 }
472 dest_buf_u8 += dest_stride;
473 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
474 }
475 }
476 }
477 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
478 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_OPA(dsc, dest_px_size)) {
479 for(y = 0; y < h; y++) {
480 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
481 lv_color_8_24_mix(src_buf_al88[src_x].lumi, &dest_buf_u8[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa));
482 }
483 dest_buf_u8 += dest_stride;
484 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
485 }
486 }
487 }
488 else if(mask_buf && opa >= LV_OPA_MAX) {
489 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_MASK(dsc, dest_px_size)) {
490 for(y = 0; y < h; y++) {
491 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
492 lv_color_8_24_mix(src_buf_al88[src_x].lumi, &dest_buf_u8[dest_x], LV_OPA_MIX2(src_buf_al88[src_x].alpha,
493 mask_buf[src_x]));
494 }
495 dest_buf_u8 += dest_stride;
496 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
497 mask_buf += mask_stride;
498 }
499 }
500 }
501 else if(mask_buf && opa < LV_OPA_MAX) {
502 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size)) {
503 for(y = 0; y < h; y++) {
504 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
505 lv_color_8_24_mix(src_buf_al88[src_x].lumi, &dest_buf_u8[dest_x], LV_OPA_MIX3(src_buf_al88[src_x].alpha,
506 mask_buf[src_x], opa));
507 }
508 dest_buf_u8 += dest_stride;
509 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
510 mask_buf += mask_stride;
511 }
512 }
513 }
514 }
515 else {
516 for(y = 0; y < h; y++) {
517 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
518 lv_color32_t src_argb;
519 src_argb.red = src_argb.green = src_argb.blue = src_buf_al88[src_x].lumi;
520 if(mask_buf == NULL) src_argb.alpha = LV_OPA_MIX2(src_buf_al88[src_x].alpha, opa);
521 else src_argb.alpha = LV_OPA_MIX3(src_buf_al88[src_x].alpha, mask_buf[dest_x], opa);
522 blend_non_normal_pixel(&dest_buf_u8[dest_x], src_argb, dsc->blend_mode);
523 }
524 if(mask_buf) mask_buf += mask_stride;
525 dest_buf_u8 += dest_stride;
526 src_buf_al88 = drawbuf_next_row(src_buf_al88, src_stride);
527 }
528 }
529 }
530
531 #endif
532
533 #if LV_DRAW_SW_SUPPORT_L8
534
l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)535 static void LV_ATTRIBUTE_FAST_MEM l8_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
536 {
537 int32_t w = dsc->dest_w;
538 int32_t h = dsc->dest_h;
539 lv_opa_t opa = dsc->opa;
540 uint8_t * dest_buf_u8 = dsc->dest_buf;
541 int32_t dest_stride = dsc->dest_stride;
542 const uint8_t * src_buf_l8 = dsc->src_buf;
543 int32_t src_stride = dsc->src_stride;
544 const lv_opa_t * mask_buf = dsc->mask_buf;
545 int32_t mask_stride = dsc->mask_stride;
546
547 int32_t dest_x;
548 int32_t src_x;
549 int32_t y;
550
551 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
552 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
553 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888(dsc, dest_px_size)) {
554 for(y = 0; y < h; y++) {
555 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
556 dest_buf_u8[dest_x + 2] = src_buf_l8[src_x];
557 dest_buf_u8[dest_x + 1] = src_buf_l8[src_x];
558 dest_buf_u8[dest_x + 0] = src_buf_l8[src_x];
559 }
560 dest_buf_u8 += dest_stride;
561 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
562 }
563 }
564 }
565 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
566 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_OPA(dsc, dest_px_size)) {
567 for(y = 0; y < h; y++) {
568 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
569 lv_color_8_24_mix(src_buf_l8[src_x], &dest_buf_u8[dest_x], opa);
570 }
571 dest_buf_u8 += dest_stride;
572 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
573 }
574 }
575 }
576 else if(mask_buf && opa >= LV_OPA_MAX) {
577 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_WITH_MASK(dsc, dest_px_size)) {
578 for(y = 0; y < h; y++) {
579 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
580 lv_color_8_24_mix(src_buf_l8[src_x], &dest_buf_u8[dest_x], mask_buf[src_x]);
581 }
582 dest_buf_u8 += dest_stride;
583 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
584 mask_buf += mask_stride;
585 }
586 }
587 }
588 else if(mask_buf && opa < LV_OPA_MAX) {
589 if(LV_RESULT_INVALID == LV_DRAW_SW_L8_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size)) {
590 for(y = 0; y < h; y++) {
591 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
592 lv_color_8_24_mix(src_buf_l8[src_x], &dest_buf_u8[dest_x], LV_OPA_MIX2(opa, mask_buf[src_x]));
593 }
594 dest_buf_u8 += dest_stride;
595 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
596 mask_buf += mask_stride;
597 }
598 }
599 }
600 }
601 else {
602 lv_color32_t src_argb;
603 for(y = 0; y < h; y++) {
604 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
605 src_argb.red = src_buf_l8[src_x];
606 src_argb.green = src_buf_l8[src_x];
607 src_argb.blue = src_buf_l8[src_x];
608 if(mask_buf == NULL) src_argb.alpha = opa;
609 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
610 blend_non_normal_pixel(&dest_buf_u8[dest_x], src_argb, dsc->blend_mode);
611 }
612 if(mask_buf) mask_buf += mask_stride;
613 dest_buf_u8 += dest_stride;
614 src_buf_l8 = drawbuf_next_row(src_buf_l8, src_stride);
615 }
616 }
617 }
618
619 #endif
620
621 #if LV_DRAW_SW_SUPPORT_RGB565
622
rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)623 static void LV_ATTRIBUTE_FAST_MEM rgb565_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
624 {
625 int32_t w = dsc->dest_w;
626 int32_t h = dsc->dest_h;
627 lv_opa_t opa = dsc->opa;
628 uint8_t * dest_buf_u8 = dsc->dest_buf;
629 int32_t dest_stride = dsc->dest_stride;
630 const lv_color16_t * src_buf_c16 = (const lv_color16_t *) dsc->src_buf;
631 int32_t src_stride = dsc->src_stride;
632 const lv_opa_t * mask_buf = dsc->mask_buf;
633 int32_t mask_stride = dsc->mask_stride;
634
635 int32_t src_x;
636 int32_t dest_x;
637 int32_t y;
638
639 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
640 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
641 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888(dsc, dest_px_size)) {
642 for(y = 0; y < h; y++) {
643 for(src_x = 0, dest_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
644 dest_buf_u8[dest_x + 2] = (src_buf_c16[src_x].red * 2106) >> 8; /*To make it rounded*/
645 dest_buf_u8[dest_x + 1] = (src_buf_c16[src_x].green * 1037) >> 8;
646 dest_buf_u8[dest_x + 0] = (src_buf_c16[src_x].blue * 2106) >> 8;
647 }
648 dest_buf_u8 += dest_stride;
649 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
650 }
651 }
652 }
653 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
654 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_OPA(dsc, dest_px_size)) {
655 uint8_t res[3];
656 for(y = 0; y < h; y++) {
657 for(src_x = 0, dest_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
658 res[2] = (src_buf_c16[src_x].red * 2106) >> 8; /*To make it rounded*/
659 res[1] = (src_buf_c16[src_x].green * 1037) >> 8;
660 res[0] = (src_buf_c16[src_x].blue * 2106) >> 8;
661 lv_color_24_24_mix(res, &dest_buf_u8[dest_x], opa);
662 }
663 dest_buf_u8 += dest_stride;
664 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
665 }
666 }
667 }
668 else if(mask_buf && opa >= LV_OPA_MAX) {
669 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_WITH_MASK(dsc, dest_px_size)) {
670 uint8_t res[3];
671 for(y = 0; y < h; y++) {
672 for(src_x = 0, dest_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
673 res[2] = (src_buf_c16[src_x].red * 2106) >> 8; /*To make it rounded*/
674 res[1] = (src_buf_c16[src_x].green * 1037) >> 8;
675 res[0] = (src_buf_c16[src_x].blue * 2106) >> 8;
676 lv_color_24_24_mix(res, &dest_buf_u8[dest_x], mask_buf[src_x]);
677 }
678 dest_buf_u8 += dest_stride;
679 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
680 mask_buf += mask_stride;
681 }
682 }
683 }
684 else {
685 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB565_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size)) {
686 uint8_t res[3];
687 for(y = 0; y < h; y++) {
688 for(src_x = 0, dest_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
689 res[2] = (src_buf_c16[src_x].red * 2106) >> 8; /*To make it rounded*/
690 res[1] = (src_buf_c16[src_x].green * 1037) >> 8;
691 res[0] = (src_buf_c16[src_x].blue * 2106) >> 8;
692 lv_color_24_24_mix(res, &dest_buf_u8[dest_x], LV_OPA_MIX2(opa, mask_buf[src_x]));
693 }
694 dest_buf_u8 += dest_stride;
695 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
696 mask_buf += mask_stride;
697 }
698 }
699 }
700 }
701 else {
702 lv_color32_t src_argb;
703 for(y = 0; y < h; y++) {
704 for(src_x = 0, dest_x = 0; src_x < w; src_x++, dest_x += dest_px_size) {
705 src_argb.red = (src_buf_c16[src_x].red * 2106) >> 8;
706 src_argb.green = (src_buf_c16[src_x].green * 1037) >> 8;
707 src_argb.blue = (src_buf_c16[src_x].blue * 2106) >> 8;
708 if(mask_buf == NULL) src_argb.alpha = opa;
709 else src_argb.alpha = LV_OPA_MIX2(mask_buf[src_x], opa);
710 blend_non_normal_pixel(&dest_buf_u8[dest_x], src_argb, dsc->blend_mode);
711 }
712 if(mask_buf) mask_buf += mask_stride;
713 dest_buf_u8 += dest_stride;
714 src_buf_c16 = drawbuf_next_row(src_buf_c16, src_stride);
715 }
716 }
717 }
718
719 #endif
720
rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,const uint8_t dest_px_size,uint32_t src_px_size)721 static void LV_ATTRIBUTE_FAST_MEM rgb888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, const uint8_t dest_px_size,
722 uint32_t src_px_size)
723 {
724 int32_t w = dsc->dest_w * dest_px_size;
725 int32_t h = dsc->dest_h;
726 lv_opa_t opa = dsc->opa;
727 uint8_t * dest_buf = dsc->dest_buf;
728 int32_t dest_stride = dsc->dest_stride;
729 const uint8_t * src_buf = dsc->src_buf;
730 int32_t src_stride = dsc->src_stride;
731 const lv_opa_t * mask_buf = dsc->mask_buf;
732 int32_t mask_stride = dsc->mask_stride;
733
734 int32_t dest_x;
735 int32_t src_x;
736 int32_t y;
737
738 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
739 /*Special case*/
740 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
741 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888(dsc, dest_px_size, src_px_size)) {
742 if(src_px_size == dest_px_size) {
743 for(y = 0; y < h; y++) {
744 lv_memcpy(dest_buf, src_buf, w);
745 dest_buf += dest_stride;
746 src_buf += src_stride;
747 }
748 }
749 else {
750 for(y = 0; y < h; y++) {
751 for(dest_x = 0, src_x = 0; dest_x < w; dest_x += dest_px_size, src_x += src_px_size) {
752 dest_buf[dest_x + 0] = src_buf[src_x + 0];
753 dest_buf[dest_x + 1] = src_buf[src_x + 1];
754 dest_buf[dest_x + 2] = src_buf[src_x + 2];
755 }
756 dest_buf += dest_stride;
757 src_buf += src_stride;
758 }
759 }
760 }
761 }
762 if(mask_buf == NULL && opa < LV_OPA_MAX) {
763 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_OPA(dsc, dest_px_size, src_px_size)) {
764 for(y = 0; y < h; y++) {
765 for(dest_x = 0, src_x = 0; dest_x < w; dest_x += dest_px_size, src_x += src_px_size) {
766 lv_color_24_24_mix(&src_buf[src_x], &dest_buf[dest_x], opa);
767 }
768 dest_buf += dest_stride;
769 src_buf += src_stride;
770 }
771 }
772 }
773 if(mask_buf && opa >= LV_OPA_MAX) {
774 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_WITH_MASK(dsc, dest_px_size, src_px_size)) {
775 uint32_t mask_x;
776 for(y = 0; y < h; y++) {
777 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x += dest_px_size, src_x += src_px_size) {
778 lv_color_24_24_mix(&src_buf[src_x], &dest_buf[dest_x], mask_buf[mask_x]);
779 }
780 dest_buf += dest_stride;
781 src_buf += src_stride;
782 mask_buf += mask_stride;
783 }
784 }
785 }
786 if(mask_buf && opa < LV_OPA_MAX) {
787 if(LV_RESULT_INVALID == LV_DRAW_SW_RGB888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size, src_px_size)) {
788 uint32_t mask_x;
789 for(y = 0; y < h; y++) {
790 for(mask_x = 0, dest_x = 0, src_x = 0; dest_x < w; mask_x++, dest_x += dest_px_size, src_x += src_px_size) {
791 lv_color_24_24_mix(&src_buf[src_x], &dest_buf[dest_x], LV_OPA_MIX2(opa, mask_buf[mask_x]));
792 }
793 dest_buf += dest_stride;
794 src_buf += src_stride;
795 mask_buf += mask_stride;
796 }
797 }
798 }
799 }
800 else {
801 lv_color32_t src_argb;
802 for(y = 0; y < h; y++) {
803 for(dest_x = 0, src_x = 0; dest_x < w; dest_x += dest_px_size, src_x += src_px_size) {
804 src_argb.red = src_buf[src_x + 2];
805 src_argb.green = src_buf[src_x + 1];
806 src_argb.blue = src_buf[src_x + 0];
807 if(mask_buf == NULL) src_argb.alpha = opa;
808 else src_argb.alpha = LV_OPA_MIX2(mask_buf[dest_x], opa);
809
810 blend_non_normal_pixel(&dest_buf[dest_x], src_argb, dsc->blend_mode);
811 }
812 if(mask_buf) mask_buf += mask_stride;
813 dest_buf += dest_stride;
814 src_buf += src_stride;
815 }
816 }
817 }
818
819 #if LV_DRAW_SW_SUPPORT_ARGB8888
820
argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc,uint32_t dest_px_size)821 static void LV_ATTRIBUTE_FAST_MEM argb8888_image_blend(lv_draw_sw_blend_image_dsc_t * dsc, uint32_t dest_px_size)
822 {
823 int32_t w = dsc->dest_w;
824 int32_t h = dsc->dest_h;
825 lv_opa_t opa = dsc->opa;
826 uint8_t * dest_buf = dsc->dest_buf;
827 int32_t dest_stride = dsc->dest_stride;
828 const lv_color32_t * src_buf_c32 = dsc->src_buf;
829 int32_t src_stride = dsc->src_stride;
830 const lv_opa_t * mask_buf = dsc->mask_buf;
831 int32_t mask_stride = dsc->mask_stride;
832
833 int32_t dest_x;
834 int32_t src_x;
835 int32_t y;
836
837 if(dsc->blend_mode == LV_BLEND_MODE_NORMAL) {
838 if(mask_buf == NULL && opa >= LV_OPA_MAX) {
839 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888(dsc, dest_px_size)) {
840 for(y = 0; y < h; y++) {
841 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
842 lv_color_24_24_mix((const uint8_t *)&src_buf_c32[src_x], &dest_buf[dest_x], src_buf_c32[src_x].alpha);
843 }
844 dest_buf += dest_stride;
845 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
846 }
847 }
848 }
849 else if(mask_buf == NULL && opa < LV_OPA_MAX) {
850 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_OPA(dsc, dest_px_size)) {
851 for(y = 0; y < h; y++) {
852 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
853 lv_color_24_24_mix((const uint8_t *)&src_buf_c32[src_x], &dest_buf[dest_x], LV_OPA_MIX2(src_buf_c32[src_x].alpha, opa));
854 }
855 dest_buf += dest_stride;
856 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
857 }
858 }
859 }
860 else if(mask_buf && opa >= LV_OPA_MAX) {
861 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_WITH_MASK(dsc, dest_px_size)) {
862 for(y = 0; y < h; y++) {
863 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
864 lv_color_24_24_mix((const uint8_t *)&src_buf_c32[src_x], &dest_buf[dest_x],
865 LV_OPA_MIX2(src_buf_c32[src_x].alpha, mask_buf[src_x]));
866 }
867 dest_buf += dest_stride;
868 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
869 mask_buf += mask_stride;
870 }
871 }
872 }
873 else if(mask_buf && opa < LV_OPA_MAX) {
874 if(LV_RESULT_INVALID == LV_DRAW_SW_ARGB8888_BLEND_NORMAL_TO_RGB888_MIX_MASK_OPA(dsc, dest_px_size)) {
875 for(y = 0; y < h; y++) {
876 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x++) {
877 lv_color_24_24_mix((const uint8_t *)&src_buf_c32[src_x], &dest_buf[dest_x],
878 LV_OPA_MIX3(src_buf_c32[src_x].alpha, mask_buf[src_x], opa));
879 }
880 dest_buf += dest_stride;
881 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
882 mask_buf += mask_stride;
883 }
884 }
885 }
886 }
887 else {
888 lv_color32_t src_argb;
889 for(y = 0; y < h; y++) {
890 for(dest_x = 0, src_x = 0; src_x < w; dest_x += dest_px_size, src_x ++) {
891 src_argb = src_buf_c32[src_x];
892 if(mask_buf == NULL) src_argb.alpha = LV_OPA_MIX2(src_argb.alpha, opa);
893 else src_argb.alpha = LV_OPA_MIX3(src_argb.alpha, mask_buf[dest_x], opa);
894
895 blend_non_normal_pixel(&dest_buf[dest_x], src_argb, dsc->blend_mode);
896 }
897 if(mask_buf) mask_buf += mask_stride;
898 dest_buf += dest_stride;
899 src_buf_c32 = drawbuf_next_row(src_buf_c32, src_stride);
900 }
901 }
902 }
903
904 #endif
905
blend_non_normal_pixel(uint8_t * dest,lv_color32_t src,lv_blend_mode_t mode)906 static inline void LV_ATTRIBUTE_FAST_MEM blend_non_normal_pixel(uint8_t * dest, lv_color32_t src, lv_blend_mode_t mode)
907 {
908 uint8_t res[3] = {0, 0, 0};
909 switch(mode) {
910 case LV_BLEND_MODE_ADDITIVE:
911 res[0] = LV_MIN(dest[0] + src.blue, 255);
912 res[1] = LV_MIN(dest[1] + src.green, 255);
913 res[2] = LV_MIN(dest[2] + src.red, 255);
914 break;
915 case LV_BLEND_MODE_SUBTRACTIVE:
916 res[0] = LV_MAX(dest[0] - src.blue, 0);
917 res[1] = LV_MAX(dest[1] - src.green, 0);
918 res[2] = LV_MAX(dest[2] - src.red, 0);
919 break;
920 case LV_BLEND_MODE_MULTIPLY:
921 res[0] = (dest[0] * src.blue) >> 8;
922 res[1] = (dest[1] * src.green) >> 8;
923 res[2] = (dest[2] * src.red) >> 8;
924 break;
925 default:
926 LV_LOG_WARN("Not supported blend mode: %d", mode);
927 return;
928 }
929 lv_color_24_24_mix(res, dest, src.alpha);
930 }
931
lv_color_8_24_mix(const uint8_t src,uint8_t * dest,uint8_t mix)932 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_8_24_mix(const uint8_t src, uint8_t * dest, uint8_t mix)
933 {
934
935 if(mix == 0) return;
936
937 if(mix >= LV_OPA_MAX) {
938 dest[0] = src;
939 dest[1] = src;
940 dest[2] = src;
941 }
942 else {
943 lv_opa_t mix_inv = 255 - mix;
944 dest[0] = (uint32_t)((uint32_t)src * mix + dest[0] * mix_inv) >> 8;
945 dest[1] = (uint32_t)((uint32_t)src * mix + dest[1] * mix_inv) >> 8;
946 dest[2] = (uint32_t)((uint32_t)src * mix + dest[2] * mix_inv) >> 8;
947 }
948 }
949
lv_color_24_24_mix(const uint8_t * src,uint8_t * dest,uint8_t mix)950 static inline void LV_ATTRIBUTE_FAST_MEM lv_color_24_24_mix(const uint8_t * src, uint8_t * dest, uint8_t mix)
951 {
952
953 if(mix == 0) return;
954
955 if(mix >= LV_OPA_MAX) {
956 dest[0] = src[0];
957 dest[1] = src[1];
958 dest[2] = src[2];
959 }
960 else {
961 lv_opa_t mix_inv = 255 - mix;
962 dest[0] = (uint32_t)((uint32_t)src[0] * mix + dest[0] * mix_inv) >> 8;
963 dest[1] = (uint32_t)((uint32_t)src[1] * mix + dest[1] * mix_inv) >> 8;
964 dest[2] = (uint32_t)((uint32_t)src[2] * mix + dest[2] * mix_inv) >> 8;
965 }
966 }
967
968 #if LV_DRAW_SW_SUPPORT_I1
969
get_bit(const uint8_t * buf,int32_t bit_idx)970 static inline uint8_t LV_ATTRIBUTE_FAST_MEM get_bit(const uint8_t * buf, int32_t bit_idx)
971 {
972 return (buf[bit_idx / 8] >> (7 - (bit_idx % 8))) & 1;
973 }
974
975 #endif
976
977
drawbuf_next_row(const void * buf,uint32_t stride)978 static inline void * LV_ATTRIBUTE_FAST_MEM drawbuf_next_row(const void * buf, uint32_t stride)
979 {
980 return (void *)((uint8_t *)buf + stride);
981 }
982
983 #endif /*LV_DRAW_SW_SUPPORT_RGB888 || LV_DRAW_SW_SUPPORT_XRGB8888*/
984
985 #endif /*LV_USE_DRAW_SW*/
986