1 /**
2 * @file lv_gpu_d2_ra6m3.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_gpu_d2_ra6m3.h"
10 #include "../../core/lv_refr.h"
11 #include <math.h>
12
13 #if LV_USE_GPU_RA6M3_G2D
14
15 #include LV_GPU_RA6M3_G2D_INCLUDE
16
17 /*********************
18 * DEFINES
19 *********************/
20 #define LOG_ERRORS
21 #ifdef LOG_ERRORS
22 #define STRINGIFY(x) #x
23 #define TOSTRING(x) STRINGIFY(x)
24
25 #define ERROR_LIST_SIZE (4)
26 #define D2_EXEC(a) lv_port_gpu_log_error(a, __func__, __LINE__)
27 #else
28 /* here is error logging not enabled */
29 #define D2_EXEC(a) a;
30 #endif
31
32 /**********************
33 * TYPEDEFS
34 **********************/
35 typedef struct {
36 d2_s32 error;
37 const char * func;
38 int line;
39 } log_error_entry;
40
41 /**********************
42 * STATIC PROTOTYPES
43 **********************/
44 #ifdef LOG_ERRORS
45 static void lv_port_gpu_log_error(d2_s32 status, const char * func, int line);
46 #endif
47 static void invalidate_cache(void);
48
49 void lv_draw_gpu_letter(lv_draw_ctx_t * draw_ctx, const lv_draw_label_dsc_t * dsc, const lv_point_t * pos_p,
50 uint32_t letter);
51
52 /**********************
53 * STATIC VARIABLES
54 **********************/
55 #ifdef LOG_ERRORS
56 static log_error_entry log_error_list[ERROR_LIST_SIZE];
57 static int error_list_index;
58 static int error_count;
59 #endif
60
61 static d2_device * _d2_handle;
62 static d2_renderbuffer * renderbuffer;
63
64 static d2_s32 src_cf_val, dst_cf_val;
65 static lv_draw_img_dsc_t img_dsc;
66 static bool color_key_enabled, alpha_enabled, blend_enabled, colorize_enabled;
67
68 /**********************
69 * STATIC FUNCTIONS
70 **********************/
lv_port_gpu_cf_lv_to_d2(lv_img_cf_t cf)71 static d2_s32 lv_port_gpu_cf_lv_to_d2(lv_img_cf_t cf)
72 {
73 d2_s32 d2_cf;
74
75 #if (DLG_LVGL_CF == 1)
76 switch(cf & ~(1 << 5)) {
77 #else
78 switch(cf) {
79 #endif /* (DLG_LVGL_CF == 1) */
80 case LV_IMG_CF_TRUE_COLOR:
81 d2_cf = d2_mode_rgb565;
82 break;
83 case LV_IMG_CF_TRUE_COLOR_CHROMA_KEYED:
84 d2_cf = d2_mode_rgb565;
85 break;
86 case LV_IMG_CF_ALPHA_1BIT:
87 d2_cf = d2_mode_alpha1;
88 break;
89 case LV_IMG_CF_ALPHA_2BIT:
90 d2_cf = d2_mode_alpha2;
91 break;
92 case LV_IMG_CF_ALPHA_4BIT:
93 d2_cf = d2_mode_alpha4;
94 break;
95 case LV_IMG_CF_ALPHA_8BIT:
96 d2_cf = d2_mode_alpha8;
97 break;
98 case LV_IMG_CF_INDEXED_1BIT:
99 d2_cf = d2_mode_i1 | d2_mode_clut;
100 break;
101 case LV_IMG_CF_INDEXED_2BIT:
102 d2_cf = d2_mode_i2 | d2_mode_clut;
103 break;
104 case LV_IMG_CF_INDEXED_4BIT:
105 d2_cf = d2_mode_i4 | d2_mode_clut;
106 break;
107 case LV_IMG_CF_INDEXED_8BIT:
108 d2_cf = d2_mode_i8 | d2_mode_clut;
109 break;
110 #if (DLG_LVGL_CF == 1)
111 case LV_IMG_CF_RGB565:
112 d2_cf = d2_mode_rgb565;
113 break;
114 case LV_IMG_CF_RGB888:
115 d2_cf = d2_mode_rgb888;
116 break;
117 case LV_IMG_CF_RGBA8888:
118 d2_cf = d2_mode_rgba8888;
119 break;
120 #endif /* DLG_LVGL_CF */
121 default:
122 return -1;
123 }
124
125 #if (DLG_LVGL_CF == 1)
126 return d2_cf | (cf & (1 << 5) ? d2_mode_rle : 0);
127 #else
128 return d2_cf;
129 #endif /* (DLG_LVGL_CF == 1) */
130 }
131
132 static bool lv_port_gpu_cf_fb_valid(d2_s32 cf)
133 {
134 if((cf & (d2_mode_rle | d2_mode_clut)) || cf < 0) {
135 return false;
136 }
137
138 switch(cf) {
139 case d2_mode_alpha8:
140 case d2_mode_rgb565:
141 case d2_mode_argb8888:
142 case d2_mode_argb4444:
143 case d2_mode_rgba8888:
144 case d2_mode_rgba4444:
145 return true;
146 default:
147 return false;
148 }
149 }
150
151 static bool lv_port_gpu_cf_has_alpha(d2_s32 cf)
152 {
153 switch(cf & ~(d2_mode_clut | d2_mode_rle)) {
154 case d2_mode_argb8888:
155 case d2_mode_rgba8888:
156 case d2_mode_argb4444:
157 case d2_mode_rgba4444:
158 case d2_mode_argb1555:
159 case d2_mode_rgba5551:
160 case d2_mode_ai44:
161 case d2_mode_i8:
162 case d2_mode_i4:
163 case d2_mode_i2:
164 case d2_mode_i1:
165 case d2_mode_alpha8:
166 case d2_mode_alpha4:
167 case d2_mode_alpha2:
168 case d2_mode_alpha1:
169 return true;
170 default:
171 return false;
172 }
173 }
174
175 static bool lv_port_gpu_cf_is_alpha(d2_s32 cf)
176 {
177 switch(cf & ~d2_mode_rle) {
178 case d2_mode_alpha8:
179 case d2_mode_alpha4:
180 case d2_mode_alpha2:
181 case d2_mode_alpha1:
182 return true;
183 default:
184 return false;
185 }
186 }
187
188 static d2_color lv_port_gpu_color_lv_to_d2(lv_color_t color)
189 {
190 uint8_t alpha, red, green, blue;
191
192 alpha = 0xFF;
193 red = color.ch.red << 3 | color.ch.red >> 2;
194 green = color.ch.green << 2 | color.ch.green >> 4;
195 blue = color.ch.blue << 3 | color.ch.blue >> 2;
196
197 return (alpha) << 24UL
198 | (red) << 16UL
199 | (green) << 8UL
200 | (blue) << 0UL;
201 }
202
203 static void lv_port_gpu_get_recolor_consts(d2_color * cl, d2_color * ch)
204 {
205 d2_color c = lv_port_gpu_color_lv_to_d2(img_dsc.recolor);
206 d2_alpha r, g, b, opa = img_dsc.recolor_opa > LV_OPA_MAX ? LV_OPA_COVER : img_dsc.recolor_opa;
207
208 r = ((uint32_t)((uint8_t)(c >> 16UL)) * opa) / 255;
209 g = ((uint32_t)((uint8_t)(c >> 8UL)) * opa) / 255;
210 b = ((uint32_t)((uint8_t)(c >> 0UL)) * opa) / 255;
211 *cl = r << 16UL | g << 8UL | b << 0UL;
212
213 r += 255 - opa;
214 g += 255 - opa;
215 b += 255 - opa;
216 *ch = r << 16UL | g << 8UL | b << 0UL;
217 }
218
219 static int lv_port_gpu_handle_indexed_color(const lv_color_t ** src, const d2_color ** clut, d2_s32 cf)
220 {
221 int clut_len = 0;
222
223 if(cf & d2_mode_clut) {
224 /* Calculate CLUT length in entries */
225 switch(cf & ~(d2_mode_clut | d2_mode_rle)) {
226 case d2_mode_i1:
227 clut_len = 2;
228 break;
229 case d2_mode_i2:
230 clut_len = 4;
231 break;
232 case d2_mode_i4:
233 clut_len = 16;
234 break;
235 case d2_mode_i8:
236 clut_len = 256;
237 break;
238 case d2_mode_ai44:
239 clut_len = 16;
240 break;
241 default:
242 return 0;
243 }
244
245 *clut = (const d2_color *)*src;
246 *src = (const lv_color_t *)((const uint32_t *)*src + clut_len);
247 }
248 return clut_len;
249 }
250
251 static int lv_port_gpu_cf_bpp(d2_s32 cf)
252 {
253 switch(cf & ~(d2_mode_clut | d2_mode_rle)) {
254 case d2_mode_argb8888:
255 return 32;
256 case d2_mode_rgba8888:
257 return 32;
258 case d2_mode_rgb888:
259 return 32;
260 case d2_mode_argb4444:
261 return 16;
262 case d2_mode_rgba4444:
263 return 16;
264 case d2_mode_argb1555:
265 return 16;
266 case d2_mode_rgba5551:
267 return 16;
268 case d2_mode_rgb565:
269 return 16;
270 case d2_mode_ai44:
271 return 8;
272 case d2_mode_i8:
273 return 8;
274 case d2_mode_i4:
275 return 4;
276 case d2_mode_i2:
277 return 2;
278 case d2_mode_i1:
279 return 1;
280 case d2_mode_alpha8:
281 return 8;
282 case d2_mode_alpha4:
283 return 4;
284 case d2_mode_alpha2:
285 return 2;
286 case d2_mode_alpha1:
287 return 1;
288 default:
289 return 0;
290 }
291 }
292
293 static d2_s32 lv_port_gpu_cf_get_default(void)
294 {
295 return d2_mode_rgb565;
296 }
297
298 static void lv_port_gpu_config_blit_clear(void)
299 {
300 alpha_enabled = false;
301 color_key_enabled = false;
302 blend_enabled = true;
303 colorize_enabled = false;
304
305 lv_draw_img_dsc_init(&img_dsc);
306
307 src_cf_val = lv_port_gpu_cf_get_default();
308 dst_cf_val = lv_port_gpu_cf_get_default();
309 }
310
311 void lv_port_gpu_init(void)
312 {
313 lv_port_gpu_config_blit_clear();
314 }
315
316 static void lv_port_gpu_rotate_point(int * x, int * y, float cos_angle, float sin_angle, int pivot_x, int pivot_y)
317 {
318 float fx, fy;
319
320 *x -= pivot_x;
321 *y -= pivot_y;
322
323 fx = ((float) * x) / 16.0f;
324 fy = ((float) * y) / 16.0f;
325
326 *x = (int)(((fx * cos_angle) - (fy * sin_angle)) * 16.0f);
327 *y = (int)(((fx * sin_angle) + (fy * cos_angle)) * 16.0f);
328
329 *x += pivot_x;
330 *y += pivot_y;
331 }
332
333 void lv_draw_ra6m3_g2d_init(void)
334 {
335 if(_d2_handle != NULL) {
336 return;
337 }
338
339 _d2_handle = d2_opendevice(0);
340
341 if(_d2_handle == NULL)
342 return;
343
344 /* set blocksize for default displaylist */
345 if(d2_setdlistblocksize(_d2_handle, 25) != D2_OK) {
346 LV_LOG_ERROR("Could NOT d2_setdlistblocksize\n");
347 d2_closedevice(_d2_handle);
348
349 return;
350 }
351
352 /* bind the hardware */
353 if(d2_inithw(_d2_handle, 0) != D2_OK) {
354 LV_LOG_ERROR("Could NOT d2_inithw\n");
355 d2_closedevice(_d2_handle);
356
357 return;
358 }
359
360 renderbuffer = d2_newrenderbuffer(_d2_handle, 20, 20);
361 if(!renderbuffer) {
362 LV_LOG_ERROR("NO renderbuffer\n");
363 d2_closedevice(_d2_handle);
364
365 return;
366 }
367 }
368
369 static void lv_port_gpu_hw_deinit(void)
370 {
371 if(_d2_handle == NULL)
372 return;
373
374 D2_EXEC(d2_freerenderbuffer(_d2_handle, renderbuffer));
375 D2_EXEC(d2_closedevice(_d2_handle));
376
377 renderbuffer = NULL;
378 _d2_handle = NULL;
379 }
380
381 void lv_port_gpu_flush(void)
382 {
383 lv_port_gpu_hw_deinit();
384 }
385
386 static void lv_port_gpu_start_render(void)
387 {
388 D2_EXEC(d2_selectrenderbuffer(_d2_handle, renderbuffer));
389 }
390
391 static void lv_port_gpu_complete_render(void)
392 {
393 D2_EXEC(d2_flushframe(_d2_handle));
394 }
395
396 void lv_port_gpu_wait(lv_draw_ctx_t * draw_ctx)
397 {
398 lv_port_gpu_complete_render();
399
400 lv_draw_sw_wait_for_finish(draw_ctx);
401 }
402
403 static void lv_port_gpu_execute_render(void)
404 {
405 if(_d2_handle) {
406 D2_EXEC(d2_executerenderbuffer(_d2_handle, renderbuffer, 0));
407 }
408 }
409
410 void lv_port_gpu_blit(int32_t x, int32_t y, lv_color_t * dst, const lv_area_t * fill_area)
411 {
412 uint32_t ModeSrc;
413
414 ModeSrc = d2_mode_rgb565;
415
416 lv_coord_t dst_width, dst_hight;
417 dst_width = lv_area_get_width(fill_area);
418 dst_hight = lv_area_get_height(fill_area);
419
420 d2_selectrenderbuffer(_d2_handle, renderbuffer);
421
422 // Generate render operations
423 d2_framebuffer(_d2_handle, (uint16_t *)&fb_background[0], LV_HOR_RES_MAX, LV_HOR_RES_MAX,
424 MAX(fill_area->y2 + 1, 2), lv_port_gpu_cf_get_default());
425
426 d2_cliprect(_d2_handle, 0, 0, LV_HOR_RES_MAX - 1, fill_area->y2);
427 d2_setblitsrc(_d2_handle, (void *) dst, dst_width, dst_width, dst_hight, ModeSrc);
428 d2_blitcopy(_d2_handle, dst_width, dst_hight, 0, 0, D2_FIX4(dst_width), D2_FIX4(dst_hight),
429 D2_FIX4(fill_area->x1), D2_FIX4(fill_area->y1), 0);
430
431 // Execute render operations
432 d2_executerenderbuffer(_d2_handle, renderbuffer, 0);
433 }
434
435 void lv_port_gpu_fill(lv_color_t * dest_buf, const lv_area_t * fill_area, lv_coord_t dst_width,
436 lv_color_t color, lv_opa_t opa)
437 {
438 invalidate_cache();
439
440 lv_port_gpu_start_render();
441
442 D2_EXEC(d2_framebuffer(_d2_handle, d1_maptovidmem(_d2_handle, dest_buf), MAX(dst_width, 2), MAX(dst_width, 2),
443 MAX(fill_area->y2 + 1, 2), lv_port_gpu_cf_get_default()));
444
445 D2_EXEC(d2_cliprect(_d2_handle, 0, 0, dst_width - 1, fill_area->y2));
446 D2_EXEC(d2_setalpha(_d2_handle, opa > LV_OPA_MAX ? 0xFF : opa));
447 D2_EXEC(d2_setcolor(_d2_handle, 0, lv_port_gpu_color_lv_to_d2(color)));
448 D2_EXEC(d2_renderbox(_d2_handle, D2_FIX4(fill_area->x1), D2_FIX4(fill_area->y1),
449 D2_FIX4(lv_area_get_width(fill_area)), D2_FIX4(lv_area_get_height(fill_area))));
450
451 lv_port_gpu_execute_render();
452 }
453
454 bool lv_port_gpu_config_blit(const lv_draw_img_dsc_t * draw_dsc, lv_img_cf_t dst_cf,
455 lv_img_cf_t src_cf, bool alpha_en, bool color_key_en, bool blend_en, bool colorize_en)
456 {
457 d2_s32 d2_src_cf, d2_dst_cf;
458
459 if(blend_en && draw_dsc->blend_mode != LV_BLEND_MODE_NORMAL
460 && draw_dsc->blend_mode != LV_BLEND_MODE_ADDITIVE) {
461 return false;
462 }
463
464 d2_src_cf = lv_port_gpu_cf_lv_to_d2(src_cf);
465 d2_dst_cf = lv_port_gpu_cf_lv_to_d2(dst_cf);
466 if(d2_src_cf < 0 || !lv_port_gpu_cf_fb_valid(d2_dst_cf)) {
467 return false;
468 }
469 src_cf_val = d2_src_cf;
470 dst_cf_val = d2_dst_cf;
471
472 img_dsc = *draw_dsc;
473
474 /* Disable alpha if alpha channel does not exist */
475 alpha_enabled = lv_port_gpu_cf_has_alpha(src_cf_val) ? alpha_en : 0;
476 color_key_enabled = color_key_en;
477 blend_enabled = blend_en;
478 colorize_enabled = colorize_en | lv_port_gpu_cf_is_alpha(src_cf_val);
479
480 return true;
481 }
482
483 static void lv_port_gpu_blit_internal(const lv_area_t * dest_area, const lv_color_t * src_buf,
484 const lv_area_t * src_area, d2_u32 flags)
485 {
486 const lv_area_t * img_area = src_area;
487 lv_area_t img_area_scaled;
488 lv_coord_t w, h, img_w, img_h;
489 d2_s32 pitch;
490 int bpp = lv_port_gpu_cf_bpp(src_cf_val);
491
492 D2_EXEC(d2_cliprect(_d2_handle, dest_area->x1, dest_area->y1, dest_area->x2, dest_area->y2));
493
494 pitch = w = lv_area_get_width(src_area);
495 h = lv_area_get_height(src_area);
496
497 if(img_dsc.zoom != LV_IMG_ZOOM_NONE) {
498 img_area_scaled.x1 = src_area->x1 + ((((int32_t)0 - img_dsc.pivot.x) * img_dsc.zoom) >> 8) + img_dsc.pivot.x;
499 img_area_scaled.x2 = src_area->x1 + ((((int32_t)w - img_dsc.pivot.x) * img_dsc.zoom) >> 8) + img_dsc.pivot.x;
500 img_area_scaled.y1 = src_area->y1 + ((((int32_t)0 - img_dsc.pivot.y) * img_dsc.zoom) >> 8) + img_dsc.pivot.y;
501 img_area_scaled.y2 = src_area->y1 + ((((int32_t)h - img_dsc.pivot.y) * img_dsc.zoom) >> 8) + img_dsc.pivot.y;
502 img_area = &img_area_scaled;
503 }
504
505 img_w = lv_area_get_width(img_area);
506 img_h = lv_area_get_height(img_area);
507
508 if(0 < bpp && bpp < 8) {
509 pitch = (w + (8 - bpp)) & (~(8 - bpp));
510 }
511
512 if(img_dsc.angle == 0) {
513 D2_EXEC(d2_setblitsrc(_d2_handle, (void *) src_buf, pitch, w, h, src_cf_val));
514
515 D2_EXEC(d2_blitcopy(_d2_handle, w, h, 0, 0,
516 D2_FIX4(img_w), D2_FIX4(img_h), D2_FIX4(img_area->x1), D2_FIX4(img_area->y1), flags));
517 }
518 else {
519 int x, y, x1, y1, x2, y2, x3, y3, x4, y4, dxu, dxv, dyu, dyv, xx, xy, yx, yy;
520 int pivot_scaled_x, pivot_scaled_y;
521 int tex_offset = (flags & d2_bf_filter) ? -32767 : 0;
522 d2_u8 amode, cmode = d2_to_copy;
523 float angle = ((float)img_dsc.angle / 10) * M_PI / 180;
524 float cos_angle = cosf(angle);
525 float sin_angle = sinf(angle);
526 d2_u8 fillmode_backup;
527
528 /* setup texture params */
529 fillmode_backup = d2_getfillmode(_d2_handle);
530 D2_EXEC(d2_setfillmode(_d2_handle, d2_fm_texture));
531 D2_EXEC(d2_settexture(_d2_handle, (void *) src_buf, pitch, w, h, src_cf_val));
532 D2_EXEC(d2_settexturemode(_d2_handle, flags & (d2_bf_filter | d2_bf_wrap)));
533 amode = flags & d2_bf_usealpha ? d2_to_copy : d2_to_one;
534 cmode = flags & d2_bf_colorize2 ? d2_to_blend : d2_to_copy;
535 D2_EXEC(d2_settextureoperation(_d2_handle, amode, cmode, cmode, cmode));
536 if(flags & d2_bf_colorize2) {
537 d2_color cl = d2_getcolor(_d2_handle, 0), ch = d2_getcolor(_d2_handle, 1);
538 D2_EXEC(d2_settexopparam(_d2_handle, d2_cc_red, (uint8_t)(cl >> 16UL),
539 (uint8_t)(ch >> 16UL)));
540 D2_EXEC(d2_settexopparam(_d2_handle, d2_cc_green, (uint8_t)(cl >> 8UL),
541 (uint8_t)(ch >> 8UL)));
542 D2_EXEC(d2_settexopparam(_d2_handle, d2_cc_blue, (uint8_t)(cl >> 0UL),
543 (uint8_t)(ch >> 0UL)));
544 }
545
546 x = D2_FIX4(img_area->x1);
547 y = D2_FIX4(img_area->y1);
548
549 /* define quad points */
550 x1 = D2_FIX4(0);
551 y1 = D2_FIX4(0);
552 x2 = D2_FIX4(img_w);
553 y2 = D2_FIX4(0);
554 x3 = D2_FIX4(img_w);
555 y3 = D2_FIX4(img_h);
556 x4 = D2_FIX4(0);
557 y4 = D2_FIX4(img_h);
558
559 /* rotate points for quad */
560 pivot_scaled_x = (img_dsc.pivot.x * img_dsc.zoom) >> 4;
561 pivot_scaled_y = (img_dsc.pivot.y * img_dsc.zoom) >> 4;
562
563 lv_port_gpu_rotate_point(&x1, &y1, cos_angle, sin_angle, pivot_scaled_x, pivot_scaled_y);
564 lv_port_gpu_rotate_point(&x2, &y2, cos_angle, sin_angle, pivot_scaled_x, pivot_scaled_y);
565 lv_port_gpu_rotate_point(&x3, &y3, cos_angle, sin_angle, pivot_scaled_x, pivot_scaled_y);
566 lv_port_gpu_rotate_point(&x4, &y4, cos_angle, sin_angle, pivot_scaled_x, pivot_scaled_y);
567
568 /* compute texture increments */
569 xx = (int)(cos_angle * 65536.0f);
570 xy = (int)(sin_angle * 65536.0f);
571 yx = (int)(-sin_angle * 65536.0f);
572 yy = (int)(cos_angle * 65536.0f);
573 dxu = ((D2_FIX16(w) / D2_FIX4(img_w)) * xx) >> 12;
574 dxv = ((D2_FIX16(w) / D2_FIX4(img_w)) * xy) >> 12;
575 dyu = ((D2_FIX16(h) / D2_FIX4(img_h)) * yx) >> 12;
576 dyv = ((D2_FIX16(h) / D2_FIX4(img_h)) * yy) >> 12;
577
578 /* map texture exactly to rotated quad, so texel center is always (0/0) top-left */
579 D2_EXEC(d2_settexelcenter(_d2_handle, 0, 0));
580 D2_EXEC(d2_settexturemapping(_d2_handle, (d2_point)(x + x1), (d2_point)(y + y1),
581 tex_offset, tex_offset, dxu, dxv, dyu, dyv));
582
583 int minx = MAX(dest_area->x1, D2_INT4(x + MIN(x1, MIN(x2, MIN(x3, x4)))));
584 int maxx = MIN(dest_area->x2, D2_INT4(x + MAX(x1, MAX(x2, MAX(x3, x4)))));
585 int slice = (flags & d2_bf_filter) ? 6 : 8;
586
587 /* Perform render operation in slices to acheive better performance */
588 for(int posx = minx; posx < maxx; posx += slice) {
589 D2_EXEC(d2_cliprect(_d2_handle, posx, dest_area->y1, MIN(posx + slice - 1, maxx), dest_area->y2));
590 D2_EXEC(d2_renderquad(_d2_handle, (d2_point)(x + x1), (d2_point)(y + y1),
591 (d2_point)(x + x2), (d2_point)(y + y2),
592 (d2_point)(x + x3), (d2_point)(y + y3),
593 (d2_point)(x + x4), (d2_point)(y + y4), 0));
594 }
595 D2_EXEC(d2_setfillmode(_d2_handle, fillmode_backup));
596 }
597 }
598
599 void lv_port_ra_gpu_blit(lv_color_t * dst, const lv_area_t * dst_area, lv_coord_t dest_stride,
600 const lv_color_t * src, const lv_area_t * src_area, lv_opa_t opa)
601 {
602 d2_u32 flags = 0;
603 const d2_color * clut = NULL;
604 int clut_len = 0;
605
606 invalidate_cache();
607
608 clut_len = lv_port_gpu_handle_indexed_color(&src, &clut, src_cf_val);
609
610 lv_port_gpu_start_render();
611
612 D2_EXEC(d2_framebuffer(_d2_handle, d1_maptovidmem(_d2_handle, dst), MAX(dest_stride, 2),
613 MAX(dst_area->x2 + 1, 2), MAX(dst_area->y2 + 1, 2), dst_cf_val));
614
615 flags |= alpha_enabled ? d2_bf_usealpha : 0;
616
617 D2_EXEC(d2_setalpha(_d2_handle, opa > LV_OPA_MAX ? LV_OPA_COVER : opa));
618
619 if(clut) {
620 D2_EXEC(d2_writetexclut_direct(_d2_handle, clut, 0, clut_len));
621 }
622
623 flags |= color_key_enabled ? d2_bf_usealpha : 0;
624 flags |= (colorize_enabled || img_dsc.recolor_opa != LV_OPA_TRANSP) ? d2_bf_colorize2 : 0;
625 if(colorize_enabled) {
626 D2_EXEC(d2_setcolor(_d2_handle, 0, lv_port_gpu_color_lv_to_d2(img_dsc.recolor)));
627 D2_EXEC(d2_setcolor(_d2_handle, 1, lv_port_gpu_color_lv_to_d2(img_dsc.recolor)));
628 }
629 else if(img_dsc.recolor_opa != LV_OPA_TRANSP) {
630 d2_color cl = 0, ch = 0;
631 lv_port_gpu_get_recolor_consts(&cl, &ch);
632 D2_EXEC(d2_setcolor(_d2_handle, 0, cl));
633 D2_EXEC(d2_setcolor(_d2_handle, 1, ch));
634 }
635
636 flags |= ((img_dsc.angle || img_dsc.zoom != LV_IMG_ZOOM_NONE) && img_dsc.antialias) ? d2_bf_filter : 0;
637
638 if(blend_enabled) {
639 D2_EXEC(d2_setblendmode(_d2_handle, d2_bm_alpha,
640 img_dsc.blend_mode != LV_BLEND_MODE_NORMAL ? d2_bm_one : d2_bm_one_minus_alpha));
641 D2_EXEC(d2_setalphablendmode(_d2_handle, d2_bm_one, d2_bm_one_minus_alpha));
642 }
643 else {
644 D2_EXEC(d2_setblendmode(_d2_handle, d2_bm_one, d2_bm_zero));
645 D2_EXEC(d2_setalphablendmode(_d2_handle, d2_bm_one, d2_bm_zero));
646 }
647
648 lv_port_gpu_blit_internal(dst_area, src, src_area, flags);
649
650 lv_port_gpu_execute_render();
651 }
652
653 void lv_draw_ra6m3_2d_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc)
654 {
655 lv_area_t blend_area;
656 if(!_lv_area_intersect(&blend_area, dsc->blend_area, draw_ctx->clip_area)) return;
657
658 bool done = false;
659
660 if(dsc->mask_buf == NULL && dsc->blend_mode == LV_BLEND_MODE_NORMAL && lv_area_get_size(&blend_area) > 100) {
661 lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
662
663 lv_color_t * dest_buf = draw_ctx->buf;
664
665 const lv_color_t * src_buf = dsc->src_buf;
666 if(src_buf) {
667 lv_draw_sw_blend_basic(draw_ctx, dsc);
668
669 lv_area_t src_area;
670 src_area.x1 = blend_area.x1 - (dsc->blend_area->x1 - draw_ctx->buf_area->x1);
671 src_area.y1 = blend_area.y1 - (dsc->blend_area->y1 - draw_ctx->buf_area->y1);
672 src_area.x2 = src_area.x1 + lv_area_get_width(dsc->blend_area) - 1;
673 src_area.y2 = src_area.y1 + lv_area_get_height(dsc->blend_area) - 1;
674
675 lv_port_ra_gpu_blit(dest_buf, &blend_area, dest_stride, src_buf, &src_area, dsc->opa);
676 done = true;
677 }
678 else if(dsc->opa >= LV_OPA_MAX) {
679 lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
680 lv_port_gpu_fill(dest_buf, &blend_area, dest_stride, dsc->color, dsc->opa);
681 done = true;
682 }
683 }
684
685 if(!done) lv_draw_sw_blend_basic(draw_ctx, dsc);
686 }
687
688 static void lv_port_gpu_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
689 const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t color_format)
690 {
691 /*TODO basic ARGB8888 image can be handles here*/
692
693 lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, color_format);
694 }
695
696 void lv_draw_ra6m3_2d_ctx_init(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
697 {
698 lv_draw_sw_init_ctx(drv, draw_ctx);
699
700 lv_draw_ra6m3_dma2d_ctx_t * ra_2d_draw_ctx = (lv_draw_sw_ctx_t *)draw_ctx;
701
702 ra_2d_draw_ctx->blend = lv_draw_ra6m3_2d_blend;
703 ra_2d_draw_ctx->base_draw.draw_img_decoded = lv_port_gpu_img_decoded;
704 ra_2d_draw_ctx->base_draw.wait_for_finish = lv_port_gpu_wait;
705 ra_2d_draw_ctx->base_draw.draw_letter = lv_draw_gpu_letter;
706 //ra_2d_draw_ctx->base_draw.buffer_copy = lv_draw_ra6m3_2d_buffer_copy;
707 }
708
709 void lv_draw_stm32_dma2d_ctx_deinit(lv_disp_t * disp, lv_draw_ctx_t * draw_ctx)
710 {
711 LV_UNUSED(disp);
712 LV_UNUSED(draw_ctx);
713 }
714
715 static void invalidate_cache(void)
716 {
717 lv_disp_t * disp = _lv_refr_get_disp_refreshing();
718 if(disp->driver->clean_dcache_cb) disp->driver->clean_dcache_cb(disp->driver);
719 }
720
721 #ifdef LOG_ERRORS
722 static void lv_port_gpu_log_error(d2_s32 status, const char * func, int line)
723 {
724 if(status) {
725 log_error_list[error_list_index].error = status;
726 log_error_list[error_list_index].func = func;
727 log_error_list[error_list_index].line = line;
728 LV_LOG_ERROR("%s\r\n", d2_geterrorstring(_d2_handle));
729 LV_LOG_ERROR("%d:\t%d - %s : %d\r\n", error_count,
730 log_error_list[error_list_index].error,
731 log_error_list[error_list_index].func,
732 log_error_list[error_list_index].line);
733
734 error_count++;
735 error_list_index++;
736 if(error_list_index >= ERROR_LIST_SIZE) {
737 error_list_index = 0;
738 }
739 }
740 }
741 #endif
742 #endif /* LV_USE_GPU_RA6M3_G2D */
743