1 /**
2 * @file lv_draw_pxp.c
3 *
4 */
5
6 /**
7 * MIT License
8 *
9 * Copyright 2022, 2023 NXP
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights to
14 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
15 * the Software, and to permit persons to whom the Software is furnished to do so,
16 * subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next paragraph)
19 * shall be included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
22 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
23 * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
25 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
26 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 /*********************
31 * INCLUDES
32 *********************/
33
34 #include "lv_draw_pxp.h"
35
36 #if LV_USE_GPU_NXP_PXP
37 #include "lv_draw_pxp_blend.h"
38
39 #if LV_COLOR_DEPTH != 32
40 #include "../../../core/lv_refr.h"
41 #endif
42
43 /*********************
44 * DEFINES
45 *********************/
46
47 /* Minimum area (in pixels) for PXP blit/fill processing. */
48 #ifndef LV_GPU_NXP_PXP_SIZE_LIMIT
49 #define LV_GPU_NXP_PXP_SIZE_LIMIT 5000
50 #endif
51
52 /**********************
53 * TYPEDEFS
54 **********************/
55
56 /**********************
57 * STATIC PROTOTYPES
58 **********************/
59
60 static void lv_draw_pxp_wait_for_finish(lv_draw_ctx_t * draw_ctx);
61
62 static void lv_draw_pxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc);
63
64 static void lv_draw_pxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
65 const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf);
66
67 static void lv_draw_pxp_buffer_copy(lv_draw_ctx_t * draw_ctx,
68 void * dest_buf, lv_coord_t dest_stride, const lv_area_t * dest_area,
69 void * src_buf, lv_coord_t src_stride, const lv_area_t * src_area);
70
71 /**********************
72 * STATIC VARIABLES
73 **********************/
74
75 /**********************
76 * MACROS
77 **********************/
78
79 /**********************
80 * GLOBAL FUNCTIONS
81 **********************/
82
lv_draw_pxp_ctx_init(lv_disp_drv_t * drv,lv_draw_ctx_t * draw_ctx)83 void lv_draw_pxp_ctx_init(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
84 {
85 lv_draw_sw_init_ctx(drv, draw_ctx);
86
87 lv_draw_pxp_ctx_t * pxp_draw_ctx = (lv_draw_sw_ctx_t *)draw_ctx;
88 pxp_draw_ctx->base_draw.draw_img_decoded = lv_draw_pxp_img_decoded;
89 pxp_draw_ctx->blend = lv_draw_pxp_blend;
90 pxp_draw_ctx->base_draw.wait_for_finish = lv_draw_pxp_wait_for_finish;
91 pxp_draw_ctx->base_draw.buffer_copy = lv_draw_pxp_buffer_copy;
92 }
93
lv_draw_pxp_ctx_deinit(lv_disp_drv_t * drv,lv_draw_ctx_t * draw_ctx)94 void lv_draw_pxp_ctx_deinit(lv_disp_drv_t * drv, lv_draw_ctx_t * draw_ctx)
95 {
96 lv_draw_sw_deinit_ctx(drv, draw_ctx);
97 }
98
99 /**********************
100 * STATIC FUNCTIONS
101 **********************/
102
103 /**
104 * During rendering, LVGL might initializes new draw_ctxs and start drawing into
105 * a separate buffer (called layer). If the content to be rendered has "holes",
106 * e.g. rounded corner, LVGL temporarily sets the disp_drv.screen_transp flag.
107 * It means the renderers should draw into an ARGB buffer.
108 * With 32 bit color depth it's not a big problem but with 16 bit color depth
109 * the target pixel format is ARGB8565 which is not supported by the GPU.
110 * In this case, the PXP callbacks should fallback to SW rendering.
111 */
need_argb8565_support()112 static inline bool need_argb8565_support()
113 {
114 #if LV_COLOR_DEPTH != 32
115 lv_disp_t * disp = _lv_refr_get_disp_refreshing();
116
117 if(disp->driver->screen_transp == 1)
118 return true;
119 #endif
120
121 return false;
122 }
123
lv_draw_pxp_wait_for_finish(lv_draw_ctx_t * draw_ctx)124 static void lv_draw_pxp_wait_for_finish(lv_draw_ctx_t * draw_ctx)
125 {
126 lv_gpu_nxp_pxp_wait();
127
128 lv_draw_sw_wait_for_finish(draw_ctx);
129 }
130
lv_draw_pxp_blend(lv_draw_ctx_t * draw_ctx,const lv_draw_sw_blend_dsc_t * dsc)131 static void lv_draw_pxp_blend(lv_draw_ctx_t * draw_ctx, const lv_draw_sw_blend_dsc_t * dsc)
132 {
133 if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
134 return;
135
136 if(need_argb8565_support()) {
137 lv_draw_sw_blend_basic(draw_ctx, dsc);
138 return;
139 }
140
141 lv_area_t blend_area;
142 /*Let's get the blend area which is the intersection of the area to draw and the clip area*/
143 if(!_lv_area_intersect(&blend_area, dsc->blend_area, draw_ctx->clip_area))
144 return; /*Fully clipped, nothing to do*/
145
146 /*Make the blend area relative to the buffer*/
147 lv_area_move(&blend_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
148 if(dsc->mask_buf != NULL || dsc->blend_mode != LV_BLEND_MODE_NORMAL ||
149 lv_area_get_size(&blend_area) < LV_GPU_NXP_PXP_SIZE_LIMIT) {
150 lv_draw_sw_blend_basic(draw_ctx, dsc);
151 return;
152 }
153
154 /*Fill/Blend only non masked, normal blended*/
155 lv_color_t * dest_buf = draw_ctx->buf;
156 lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
157 const lv_color_t * src_buf = dsc->src_buf;
158
159 if(src_buf == NULL) {
160 lv_gpu_nxp_pxp_fill(dest_buf, &blend_area, dest_stride, dsc->color, dsc->opa);
161 }
162 else {
163 lv_area_t src_area;
164 src_area.x1 = blend_area.x1 - (dsc->blend_area->x1 - draw_ctx->buf_area->x1);
165 src_area.y1 = blend_area.y1 - (dsc->blend_area->y1 - draw_ctx->buf_area->y1);
166 src_area.x2 = src_area.x1 + lv_area_get_width(dsc->blend_area) - 1;
167 src_area.y2 = src_area.y1 + lv_area_get_height(dsc->blend_area) - 1;
168 lv_coord_t src_stride = lv_area_get_width(dsc->blend_area);
169
170 lv_gpu_nxp_pxp_blit(dest_buf, &blend_area, dest_stride, src_buf, &src_area, src_stride,
171 dsc->opa, LV_DISP_ROT_NONE);
172 }
173 }
174
lv_draw_pxp_img_decoded(lv_draw_ctx_t * draw_ctx,const lv_draw_img_dsc_t * dsc,const lv_area_t * coords,const uint8_t * map_p,lv_img_cf_t cf)175 static void lv_draw_pxp_img_decoded(lv_draw_ctx_t * draw_ctx, const lv_draw_img_dsc_t * dsc,
176 const lv_area_t * coords, const uint8_t * map_p, lv_img_cf_t cf)
177 {
178 if(dsc->opa <= (lv_opa_t)LV_OPA_MIN)
179 return;
180
181 if(need_argb8565_support()) {
182 lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
183 return;
184 }
185
186 const lv_color_t * src_buf = (const lv_color_t *)map_p;
187 if(!src_buf) {
188 lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
189 return;
190 }
191
192 lv_area_t rel_coords;
193 lv_area_copy(&rel_coords, coords);
194 lv_area_move(&rel_coords, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
195
196 lv_area_t rel_clip_area;
197 lv_area_copy(&rel_clip_area, draw_ctx->clip_area);
198 lv_area_move(&rel_clip_area, -draw_ctx->buf_area->x1, -draw_ctx->buf_area->y1);
199
200 bool has_scale = (dsc->zoom != LV_IMG_ZOOM_NONE);
201 bool has_rotation = (dsc->angle != 0);
202 bool unsup_rotation = false;
203
204 lv_area_t blend_area;
205 if(has_rotation)
206 lv_area_copy(&blend_area, &rel_coords);
207 else if(!_lv_area_intersect(&blend_area, &rel_coords, &rel_clip_area))
208 return; /*Fully clipped, nothing to do*/
209
210 bool has_mask = lv_draw_mask_is_any(&blend_area);
211 lv_coord_t src_width = lv_area_get_width(coords);
212 lv_coord_t src_height = lv_area_get_height(coords);
213
214 if(has_rotation) {
215 /*
216 * PXP can only rotate at 90x angles.
217 */
218 if(dsc->angle % 900) {
219 PXP_LOG_TRACE("Rotation angle %d is not supported. PXP can rotate only 90x angle.", dsc->angle);
220 unsup_rotation = true;
221 }
222
223 /*
224 * PXP is set to process 16x16 blocks to optimize the system for memory
225 * bandwidth and image processing time.
226 * The output engine essentially truncates any output pixels after the
227 * desired number of pixels has been written.
228 * When rotating a source image and the output is not divisible by the block
229 * size, the incorrect pixels could be truncated and the final output image
230 * can look shifted.
231 */
232 if(src_width % 16 || src_height % 16) {
233 PXP_LOG_TRACE("Rotation is not supported for image w/o alignment to block size 16x16.");
234 unsup_rotation = true;
235 }
236 }
237
238 if(has_mask || has_scale || unsup_rotation || lv_area_get_size(&blend_area) < LV_GPU_NXP_PXP_SIZE_LIMIT
239 #if LV_COLOR_DEPTH != 32
240 || lv_img_cf_has_alpha(cf)
241 #endif
242 ) {
243 lv_draw_sw_img_decoded(draw_ctx, dsc, coords, map_p, cf);
244 return;
245 }
246
247 lv_color_t * dest_buf = draw_ctx->buf;
248 lv_coord_t dest_stride = lv_area_get_width(draw_ctx->buf_area);
249
250 lv_area_t src_area;
251 src_area.x1 = blend_area.x1 - (coords->x1 - draw_ctx->buf_area->x1);
252 src_area.y1 = blend_area.y1 - (coords->y1 - draw_ctx->buf_area->y1);
253 src_area.x2 = src_area.x1 + src_width - 1;
254 src_area.y2 = src_area.y1 + src_height - 1;
255 lv_coord_t src_stride = lv_area_get_width(coords);
256
257 lv_gpu_nxp_pxp_blit_transform(dest_buf, &blend_area, dest_stride, src_buf, &src_area, src_stride,
258 dsc, cf);
259 }
260
lv_draw_pxp_buffer_copy(lv_draw_ctx_t * draw_ctx,void * dest_buf,lv_coord_t dest_stride,const lv_area_t * dest_area,void * src_buf,lv_coord_t src_stride,const lv_area_t * src_area)261 static void lv_draw_pxp_buffer_copy(lv_draw_ctx_t * draw_ctx,
262 void * dest_buf, lv_coord_t dest_stride, const lv_area_t * dest_area,
263 void * src_buf, lv_coord_t src_stride, const lv_area_t * src_area)
264 {
265 LV_UNUSED(draw_ctx);
266
267 if(lv_area_get_size(dest_area) < LV_GPU_NXP_PXP_SIZE_LIMIT) {
268 lv_draw_sw_buffer_copy(draw_ctx, dest_buf, dest_stride, dest_area, src_buf, src_stride, src_area);
269 return;
270 }
271
272 lv_gpu_nxp_pxp_buffer_copy(dest_buf, dest_area, dest_stride, src_buf, src_area, src_stride);
273 }
274
275 #endif /*LV_USE_GPU_NXP_PXP*/
276