1 /*
2 * Copyright (c) 2022, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8 #include "dma350_ch_drv.h"
9 #include "dma350_lib.h"
10
11 #include <arm_cmse.h>
12 #include <stdbool.h>
13 #include <stdint.h>
14 #include <stddef.h>
15
16 /* Header for target specific MPU definitions */
17 #ifndef CMSIS_device_header
18 /* CMSIS pack default header, containing the CMSIS_device_header definition */
19 #include "RTE_Components.h"
20 #endif
21 #include CMSIS_device_header
22
23 /**********************************************/
24 /************** Static Functions **************/
25 /**********************************************/
26
dma350_remap(uint32_t addr)27 static uint32_t dma350_remap(uint32_t addr)
28 {
29 const struct dma350_remap_range_t* map;
30
31 for(uint32_t i = 0; i < dma350_address_remap.size; ++i) {
32 map = &dma350_address_remap.map[i];
33 if(addr <= map->end && addr >= map->begin) {
34 return addr + map->offset;
35 }
36 }
37 return addr;
38 }
39
dma350_runcmd(struct dma350_ch_dev_t * dev,enum dma350_lib_exec_type_t exec_type)40 static enum dma350_lib_error_t dma350_runcmd(struct dma350_ch_dev_t* dev,
41 enum dma350_lib_exec_type_t exec_type)
42 {
43 union dma350_ch_status_t status;
44
45 /* Extra setup based on execution type */
46 switch(exec_type) {
47 case DMA350_LIB_EXEC_IRQ:
48 dma350_ch_enable_intr(dev, DMA350_CH_INTREN_DONE);
49 break;
50 case DMA350_LIB_EXEC_START_ONLY:
51 case DMA350_LIB_EXEC_BLOCKING:
52 dma350_ch_disable_intr(dev, DMA350_CH_INTREN_DONE);
53 break;
54 default:
55 return DMA350_LIB_ERR_INVALID_EXEC_TYPE;
56 }
57
58 dma350_ch_cmd(dev, DMA350_CH_CMD_ENABLECMD);
59
60 /* Return or check based on execution type */
61 switch(exec_type) {
62 case DMA350_LIB_EXEC_IRQ:
63 case DMA350_LIB_EXEC_START_ONLY:
64 if (dma350_ch_is_stat_set(dev, DMA350_CH_STAT_ERR)) {
65 return DMA350_LIB_ERR_CMD_ERR;
66 }
67 break;
68 case DMA350_LIB_EXEC_BLOCKING:
69 status = dma350_ch_wait_status(dev);
70 if (!status.b.STAT_DONE || status.b.STAT_ERR) {
71 return DMA350_LIB_ERR_CMD_ERR;
72 }
73 break;
74 /* default is handled above */
75 }
76
77 return DMA350_LIB_ERR_NONE;
78 }
79
get_default_memattr(uint32_t address)80 static uint8_t get_default_memattr(uint32_t address)
81 {
82 uint8_t mpu_attribute;
83 switch ((address >> 29) & 0x7) /* Get top 3 bits */
84 {
85 case (0): // CODE region, WT-RA
86 // Use same attribute for inner and outer
87 mpu_attribute = ARM_MPU_ATTR((ARM_MPU_ATTR_MEMORY_(0, 0, 1, 0)), (ARM_MPU_ATTR_MEMORY_(0, 0, 1, 0))); // NT=0, WB=0, RA=1, WA=0
88 break;
89 case (1): // SRAM region, WB-WA-RA
90 // Use same attribute for inner and outer
91 mpu_attribute = ARM_MPU_ATTR((ARM_MPU_ATTR_MEMORY_(0, 1, 1, 1)), (ARM_MPU_ATTR_MEMORY_(0, 1, 1, 1))); // NT=0, WB=1, RA=1, WA=1
92 break;
93 case (2): // Peripheral region (Shareable)
94 mpu_attribute = ARM_MPU_ATTR(ARM_MPU_ATTR_DEVICE, ARM_MPU_ATTR_DEVICE_nGnRE);
95 break;
96 case (3): // SRAM region, WB-WA-RA
97 // Use same attribute for inner and outer
98 mpu_attribute = ARM_MPU_ATTR((ARM_MPU_ATTR_MEMORY_(0, 1, 1, 1)), (ARM_MPU_ATTR_MEMORY_(0, 1, 1, 1))); // NT=0, WB=1, RA=1, WA=1
99 break;
100 case (4): // SRAM region, WT-RA
101 // Use same attribute for inner and outer
102 mpu_attribute = ARM_MPU_ATTR((ARM_MPU_ATTR_MEMORY_(0, 0, 1, 0)), (ARM_MPU_ATTR_MEMORY_(0, 0, 1, 0))); // NT=0, WB=0, RA=1, WA=0
103 break;
104 case (5): // Device region (Shareable)
105 mpu_attribute = ARM_MPU_ATTR(ARM_MPU_ATTR_DEVICE, ARM_MPU_ATTR_DEVICE_nGnRE);
106 break;
107 case (6): // Device region (Shareable)
108 mpu_attribute = ARM_MPU_ATTR(ARM_MPU_ATTR_DEVICE, ARM_MPU_ATTR_DEVICE_nGnRE);
109 break;
110 default: // System / Vendor specific
111 if ((address < 0xE0100000UL))
112 {
113 mpu_attribute = ARM_MPU_ATTR(ARM_MPU_ATTR_DEVICE, ARM_MPU_ATTR_DEVICE_nGnRnE); // PPB
114 } else {
115 mpu_attribute = ARM_MPU_ATTR(ARM_MPU_ATTR_DEVICE, ARM_MPU_ATTR_DEVICE_nGnRE); // Vendor
116 }
117 break;
118 } /* end switch */
119 return mpu_attribute;
120 }
121
122 struct dma350_memattr {
123 bool nonsecure;
124 bool unprivileged;
125 uint8_t mpu_attribute;
126 uint8_t mpu_shareability;
127 };
128
dma350_get_memattr(void * address,struct dma350_memattr * memattr,bool writable)129 static enum dma350_lib_error_t dma350_get_memattr(void* address,
130 struct dma350_memattr* memattr,
131 bool writable)
132 {
133 cmse_address_info_t address_info;
134 MPU_Type *Selected_MPU; /* Pointer to selected MPU (MPU / MPU_NS) */
135 uint32_t mpu_attri_raw, saved_MPU_RNR;
136 uint8_t mpu_attr_idx;
137 memattr->mpu_attribute = 0;
138 memattr->mpu_shareability = 0;
139 #if defined(__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
140 /* Check if address is readable by non-secure, then if by unprivileged */
141 /* Secure state - alternate (NS) MPU alias availabe */
142 /* Check if address is readable by privileged (NS) */
143 address_info = cmse_TTA(address);
144 if(writable ? address_info.flags.nonsecure_readwrite_ok
145 : address_info.flags.nonsecure_read_ok) {
146 memattr->nonsecure = true;
147 Selected_MPU = MPU_NS; /* Use non-secure MPU for attr lookup */
148 /* Check if address is readable by unprivileged (NS) */
149 /* Updating address_info is OK, as MPU region and its validity is set
150 * regardless of whether the address is accessible by unprivileged */
151 address_info = cmse_TTAT(address);
152 if(writable ? address_info.flags.nonsecure_readwrite_ok
153 : address_info.flags.nonsecure_read_ok)
154 {
155 memattr->unprivileged = true;
156 } else {
157 memattr->unprivileged = false;
158 }
159 } else {
160 /* Target memory only readable by secure */
161 memattr->nonsecure = false;
162 Selected_MPU = MPU; /* Use secure MPU for attr lookup */
163 /* Check if address is readable by unprivileged (S) */
164 /* Update address_info, as Alternate lookup provided no valid region */
165 /* Unprivileged lookup is OK as MPU region and its validity is set
166 * regardless of whether the address is accessible by unprivileged */
167 address_info = cmse_TTT(address);
168 if(writable ? address_info.flags.readwrite_ok
169 : address_info.flags.read_ok) {
170 memattr->unprivileged = true;
171 } else {
172 memattr->unprivileged = false;
173 }
174 }
175 #else
176 /* Non-secure state */
177 /* Check if address is readable by privileged (NS) */
178 address_info = cmse_TT(address);
179 if(writable ? address_info.flags.readwrite_ok
180 : address_info.flags.read_ok) {
181 memattr->nonsecure = true;
182 Selected_MPU = MPU; /* Only non-aliased MPU available (== MPU_NS) */
183 /* Check if address is readable by unprivileged (NS) */
184 /* Updating address_info is OK, as MPU region and its validity is set
185 * regardless of whether the address is accessible by unprivileged */
186 address_info = cmse_TTT(address);
187 if(writable ? address_info.flags.readwrite_ok
188 : address_info.flags.read_ok)
189 {
190 memattr->unprivileged = true;
191 } else {
192 memattr->unprivileged = false;
193 }
194 } else {
195 /* Target memory not readable by non-secure */
196 return DMA350_LIB_ERR_RANGE_NOT_ACCESSIBLE;
197 }
198 #endif
199 if ((Selected_MPU->CTRL & MPU_CTRL_ENABLE_Msk) &&
200 address_info.flags.mpu_region_valid) {
201 /* MPU is enabled, lookup attributes */
202 saved_MPU_RNR = Selected_MPU->RNR; /* Save MPU_RNR */
203 Selected_MPU->RNR = address_info.flags.mpu_region; /* Select Region */
204 mpu_attr_idx = (Selected_MPU->RLAR & MPU_RLAR_AttrIndx_Msk)
205 >> MPU_RLAR_AttrIndx_Pos;
206 memattr->mpu_shareability = (Selected_MPU->RBAR & MPU_RBAR_SH_Msk)
207 >> MPU_RBAR_SH_Pos;
208 if (mpu_attr_idx > 3)
209 {
210 mpu_attri_raw = Selected_MPU->MAIR[1]; /* ATTR4 - ATTR7 */
211 }
212 else
213 {
214 mpu_attri_raw = Selected_MPU->MAIR[0]; /* ATTR0 - ATTR3 */
215 }
216 Selected_MPU->RNR = saved_MPU_RNR; /* Restore MPU_RNR */
217 /* Extract 8-bit attribute */
218 memattr->mpu_attribute = (mpu_attri_raw >> ((mpu_attr_idx & 0x3) << 3)) & 0xFFUL;
219 } else {
220 /* If MPU is not enabled, use privileged access */
221 memattr->unprivileged = false;
222 /* Default memory map lookup for attributes */
223 memattr->mpu_attribute = get_default_memattr((uint32_t)address);
224 }
225
226 return DMA350_LIB_ERR_NONE;
227 }
228
229
230 /**********************************************/
231 /************** Public Functions **************/
232 /**********************************************/
233
dma350_lib_set_src(struct dma350_ch_dev_t * dev,const void * src)234 enum dma350_lib_error_t dma350_lib_set_src(struct dma350_ch_dev_t* dev,
235 const void* src)
236 {
237 struct dma350_memattr memattr;
238 enum dma350_lib_error_t lib_err;
239
240 lib_err = verify_dma350_ch_dev_ready(dev);
241 if(lib_err != DMA350_LIB_ERR_NONE) {
242 return lib_err;
243 }
244
245 lib_err = dma350_get_memattr((void*)src, &memattr, false);
246 if(lib_err != DMA350_LIB_ERR_NONE) {
247 return lib_err;
248 }
249
250 if(memattr.nonsecure) {
251 dma350_ch_set_src_trans_nonsecure(dev);
252 } else {
253 dma350_ch_set_src_trans_secure(dev);
254 }
255 if(memattr.unprivileged) {
256 dma350_ch_set_src_trans_unprivileged(dev);
257 } else {
258 dma350_ch_set_src_trans_privileged(dev);
259 }
260 dma350_ch_set_srcmemattr(dev, memattr.mpu_attribute,
261 memattr.mpu_shareability);
262 dma350_ch_set_src(dev, dma350_remap((uint32_t)src));
263
264 return DMA350_LIB_ERR_NONE;
265 }
266
dma350_lib_set_des(struct dma350_ch_dev_t * dev,void * des)267 enum dma350_lib_error_t dma350_lib_set_des(struct dma350_ch_dev_t* dev,
268 void* des)
269 {
270 struct dma350_memattr memattr;
271 enum dma350_lib_error_t lib_err;
272
273 lib_err = verify_dma350_ch_dev_ready(dev);
274 if(lib_err != DMA350_LIB_ERR_NONE) {
275 return lib_err;
276 }
277
278 lib_err = dma350_get_memattr(des, &memattr, true);
279 if(lib_err != DMA350_LIB_ERR_NONE) {
280 return lib_err;
281 }
282
283 if(memattr.nonsecure) {
284 dma350_ch_set_des_trans_nonsecure(dev);
285 } else {
286 dma350_ch_set_des_trans_secure(dev);
287 }
288 if(memattr.unprivileged) {
289 dma350_ch_set_des_trans_unprivileged(dev);
290 } else {
291 dma350_ch_set_des_trans_privileged(dev);
292 }
293 dma350_ch_set_desmemattr(dev, memattr.mpu_attribute,
294 memattr.mpu_shareability);
295 dma350_ch_set_des(dev, dma350_remap((uint32_t)des));
296
297 return DMA350_LIB_ERR_NONE;
298 }
299
dma350_lib_set_src_des(struct dma350_ch_dev_t * dev,const void * src,void * des,uint32_t src_size,uint32_t des_size)300 enum dma350_lib_error_t dma350_lib_set_src_des(struct dma350_ch_dev_t* dev,
301 const void* src, void* des,
302 uint32_t src_size,
303 uint32_t des_size)
304 {
305 enum dma350_lib_error_t lib_err;
306
307 lib_err = verify_dma350_ch_dev_ready(dev);
308 if(lib_err != DMA350_LIB_ERR_NONE) {
309 return lib_err;
310 }
311 if(NULL == cmse_check_address_range((void*)src, src_size, CMSE_MPU_READ)) {
312 return DMA350_LIB_ERR_RANGE_NOT_ACCESSIBLE;
313 }
314 if(NULL == cmse_check_address_range(des, des_size, CMSE_MPU_READWRITE)) {
315 return DMA350_LIB_ERR_RANGE_NOT_ACCESSIBLE;
316 }
317 lib_err = dma350_lib_set_src(dev, src);
318 if(lib_err != DMA350_LIB_ERR_NONE) {
319 return lib_err;
320 }
321 lib_err = dma350_lib_set_des(dev, des);
322 if(lib_err != DMA350_LIB_ERR_NONE) {
323 return lib_err;
324 }
325 return DMA350_LIB_ERR_NONE;
326 }
327
dma350_memcpy(struct dma350_ch_dev_t * dev,const void * src,void * des,uint32_t size,enum dma350_lib_exec_type_t exec_type)328 enum dma350_lib_error_t dma350_memcpy(struct dma350_ch_dev_t* dev,
329 const void* src, void* des, uint32_t size,
330 enum dma350_lib_exec_type_t exec_type)
331 {
332 enum dma350_lib_error_t lib_err;
333
334 lib_err = verify_dma350_ch_dev_ready(dev);
335 if(lib_err != DMA350_LIB_ERR_NONE) {
336 return lib_err;
337 }
338
339 lib_err = dma350_lib_set_src_des(dev, src, des, size, size);
340 if(lib_err != DMA350_LIB_ERR_NONE) {
341 return lib_err;
342 }
343 dma350_ch_set_xaddr_inc(dev, 1, 1);
344
345 if (size > 0xFFFF) {
346 dma350_ch_set_xsize32(dev, size, size);
347 }
348 else {
349 dma350_ch_set_xsize16(dev, (uint16_t)size, (uint16_t)size);
350 }
351 dma350_ch_set_transize(dev, DMA350_CH_TRANSIZE_8BITS);
352 dma350_ch_set_xtype(dev, DMA350_CH_XTYPE_CONTINUE);
353 dma350_ch_set_ytype(dev, DMA350_CH_YTYPE_DISABLE);
354
355 return dma350_runcmd(dev, exec_type);
356 }
357
dma350_memmove(struct dma350_ch_dev_t * dev,const void * src,void * des,uint32_t size,enum dma350_lib_exec_type_t exec_type)358 enum dma350_lib_error_t dma350_memmove(struct dma350_ch_dev_t* dev,
359 const void* src, void* des, uint32_t size,
360 enum dma350_lib_exec_type_t exec_type)
361 {
362 enum dma350_lib_error_t lib_err;
363
364 lib_err = verify_dma350_ch_dev_ready(dev);
365 if(lib_err != DMA350_LIB_ERR_NONE) {
366 return lib_err;
367 }
368
369 if (src < des && (((const uint8_t*)src) + size) > (uint8_t*)des) {
370 /* Start from the end if the end of the source overlaps with
371 the start of the destination */
372 src = (const uint8_t*) (((const uint8_t*)src) + size - 1);
373 des = (uint8_t*) (((uint8_t*)des) + size - 1);
374 dma350_ch_set_xaddr_inc(dev, -1, -1);
375 }
376 else {
377 dma350_ch_set_xaddr_inc(dev, 1, 1);
378 }
379
380 lib_err = dma350_lib_set_src(dev, src);
381 if(lib_err != DMA350_LIB_ERR_NONE) {
382 return lib_err;
383 }
384 lib_err = dma350_lib_set_des(dev, des);
385 if(lib_err != DMA350_LIB_ERR_NONE) {
386 return lib_err;
387 }
388 dma350_ch_set_xsize32(dev, size, size);
389 dma350_ch_set_transize(dev, DMA350_CH_TRANSIZE_8BITS);
390 dma350_ch_set_xtype(dev, DMA350_CH_XTYPE_CONTINUE);
391 dma350_ch_set_ytype(dev, DMA350_CH_YTYPE_DISABLE);
392
393 return dma350_runcmd(dev, exec_type);
394 }
395
dma350_endian_swap(struct dma350_ch_dev_t * dev,const void * src,void * des,uint8_t size,uint32_t count)396 enum dma350_lib_error_t dma350_endian_swap(struct dma350_ch_dev_t* dev,
397 const void* src, void* des,
398 uint8_t size, uint32_t count)
399 {
400 uint32_t remaining = 0;
401 enum dma350_lib_error_t lib_err;
402 const uint8_t *ptr8 = (const uint8_t*) src;
403
404 lib_err = verify_dma350_ch_dev_ready(dev);
405 if(lib_err != DMA350_LIB_ERR_NONE) {
406 return lib_err;
407 }
408
409 lib_err = dma350_lib_set_des(dev, des);
410 if(lib_err != DMA350_LIB_ERR_NONE) {
411 return lib_err;
412 }
413 /* First copy will always start at size - 1 offset, memory attributes are
414 * expected to be constant whole the whole affected memory, so it is enough
415 * to set the memory attributes once, then only update the src address. */
416 lib_err = dma350_lib_set_src(dev, &ptr8[size - 1]);
417 if(lib_err != DMA350_LIB_ERR_NONE) {
418 return lib_err;
419 }
420 dma350_ch_set_xtype(dev, DMA350_CH_XTYPE_CONTINUE);
421 dma350_ch_set_ytype(dev, DMA350_CH_YTYPE_CONTINUE);
422 dma350_ch_set_transize(dev, DMA350_CH_TRANSIZE_8BITS);
423 dma350_ch_set_xaddr_inc(dev, -1, 1);
424 dma350_ch_set_yaddrstride(dev, size, 0);
425 dma350_ch_set_donetype(dev, DMA350_CH_DONETYPE_END_OF_CMD);
426
427 /* Split up the image into smaller parts to fit the ysize into 16 bits. */
428 /* FIXME: command restart cannot be used, because of a bug: at the end of
429 * the command, srcaddr is not updated if yaddrstride is negative */
430 remaining = count;
431 while(remaining)
432 {
433 union dma350_ch_status_t status;
434 uint16_t copy_count = remaining > UINT16_MAX ? UINT16_MAX :
435 (uint16_t) remaining;
436 /* Start at last byte: size - 1,
437 * then start at copy_count * size higher.
438 * Total copied count = count - remaining */
439 const uint8_t *ptr_start = &ptr8[(1 + count - remaining) * size - 1];
440 dma350_ch_set_src(dev, (uint32_t) ptr_start);
441 dma350_ch_set_ysize16(dev, copy_count, 1);
442 dma350_ch_set_xsize32(dev, size, size * copy_count);
443 remaining -= copy_count;
444
445 dma350_ch_cmd(dev, DMA350_CH_CMD_ENABLECMD);
446
447 /* Blocking until done as the whole operation is split into multiple
448 * DMA commands.
449 * Can be updated to non-blocking when FIXME above is fixed. */
450 status = dma350_ch_wait_status(dev);
451 if (!status.b.STAT_DONE || status.b.STAT_ERR) {
452 return DMA350_LIB_ERR_CMD_ERR;
453 }
454 }
455
456 return DMA350_LIB_ERR_NONE;
457 }
458
dma350_draw_from_canvas(struct dma350_ch_dev_t * dev,const void * src,void * des,uint32_t src_width,uint16_t src_height,uint16_t src_line_width,uint32_t des_width,uint16_t des_height,uint16_t des_line_width,enum dma350_ch_transize_t pixelsize,enum dma350_lib_transform_t transform,enum dma350_lib_exec_type_t exec_type)459 enum dma350_lib_error_t dma350_draw_from_canvas(struct dma350_ch_dev_t* dev,
460 const void* src, void* des,
461 uint32_t src_width, uint16_t src_height,
462 uint16_t src_line_width,
463 uint32_t des_width, uint16_t des_height,
464 uint16_t des_line_width,
465 enum dma350_ch_transize_t pixelsize,
466 enum dma350_lib_transform_t transform,
467 enum dma350_lib_exec_type_t exec_type)
468 {
469 uint8_t *des_uint8_t;
470 uint32_t des_offset, des_xsize;
471 uint16_t des_ysize, des_yaddrstride;
472 int16_t des_xaddrinc;
473 enum dma350_lib_error_t lib_err;
474
475 lib_err = verify_dma350_ch_dev_ready(dev);
476 if(lib_err != DMA350_LIB_ERR_NONE) {
477 return lib_err;
478 }
479
480 switch(transform) {
481 case DMA350_LIB_TRANSFORM_NONE:
482 des_offset = 0;
483 des_xsize = des_width;
484 des_ysize = des_height;
485 des_xaddrinc = 1;
486 des_yaddrstride = des_line_width;
487 break;
488 case DMA350_LIB_TRANSFORM_MIRROR_HOR:
489 /* Top right */
490 des_offset = des_width - 1;
491 des_xsize = des_width;
492 des_ysize = des_height;
493 des_xaddrinc = -1;
494 des_yaddrstride = des_line_width;
495 break;
496 case DMA350_LIB_TRANSFORM_MIRROR_VER:
497 /* Bottom left */
498 des_offset = (des_height - 1) * des_line_width;
499 des_xsize = des_width;
500 des_ysize = des_height;
501 des_xaddrinc = 1;
502 des_yaddrstride = -des_line_width;
503 break;
504 case DMA350_LIB_TRANSFORM_MIRROR_TLBR:
505 if(des_width > UINT16_MAX) {
506 return DMA350_LIB_ERR_CFG_ERR;
507 }
508 /* Bottom right */
509 des_offset = (des_height-1) * des_line_width + des_width - 1;
510 des_xsize = des_height;
511 des_ysize = (uint16_t)des_width;
512 des_xaddrinc = (int16_t)(-des_line_width);
513 des_yaddrstride = (uint16_t)-1;
514 break;
515 case DMA350_LIB_TRANSFORM_MIRROR_TRBL:
516 if(des_width > UINT16_MAX) {
517 return DMA350_LIB_ERR_CFG_ERR;
518 }
519 des_offset = 0;
520 des_xsize = des_height;
521 des_ysize = (uint16_t)des_width;
522 des_xaddrinc = (int16_t)des_line_width;
523 des_yaddrstride = 1;
524 break;
525 case DMA350_LIB_TRANSFORM_ROTATE_90:
526 if(des_width > UINT16_MAX) {
527 return DMA350_LIB_ERR_CFG_ERR;
528 }
529 /* Top right */
530 des_offset = des_width - 1;
531 des_xsize = des_height;
532 des_ysize = (uint16_t)des_width;
533 des_xaddrinc = (int16_t)des_line_width;
534 des_yaddrstride = (uint16_t)-1;
535 break;
536 case DMA350_LIB_TRANSFORM_ROTATE_180:
537 /* Bottom right */
538 des_offset = (des_height-1) * des_line_width + des_width - 1;
539 des_xsize = des_width;
540 des_ysize = des_height;
541 des_xaddrinc = -1;
542 des_yaddrstride = -des_line_width;
543 break;
544 case DMA350_LIB_TRANSFORM_ROTATE_270:
545 if(des_width > UINT16_MAX) {
546 return DMA350_LIB_ERR_CFG_ERR;
547 }
548 /* Bottom left */
549 des_offset = (des_height - 1) * des_line_width;
550 des_xsize = des_height;
551 des_ysize = (uint16_t)des_width;
552 des_xaddrinc = (int16_t)(-des_line_width);
553 des_yaddrstride = 1;
554 break;
555 default:
556 return DMA350_LIB_ERR_CFG_ERR;
557 }
558
559 /* Up until this point, offset was set as number of pixels. It needs to be
560 multiplied by the size of the pixel to get the byte address offset.
561 Pixel size is based on dma350_ch_transize_t which is calculated by
562 2^transize, so the multiplication can be reduced to a bitshift. */
563 des_offset <<= pixelsize;
564 des_uint8_t = (uint8_t*) des;
565 lib_err = dma350_lib_set_src(dev, src);
566 if(lib_err != DMA350_LIB_ERR_NONE) {
567 return lib_err;
568 }
569 lib_err = dma350_lib_set_des(dev, &des_uint8_t[des_offset]);
570 if(lib_err != DMA350_LIB_ERR_NONE) {
571 return lib_err;
572 }
573
574 dma350_ch_set_xaddr_inc(dev, 1, des_xaddrinc);
575 dma350_ch_set_xsize32(dev, src_width, des_xsize);
576 dma350_ch_set_ysize16(dev, src_height, des_ysize);
577 dma350_ch_set_yaddrstride(dev, src_line_width, des_yaddrstride);
578
579 dma350_ch_set_transize(dev, pixelsize);
580 dma350_ch_set_xtype(dev, DMA350_CH_XTYPE_WRAP);
581 dma350_ch_set_ytype(dev, DMA350_CH_YTYPE_WRAP);
582
583 return dma350_runcmd(dev, exec_type);
584 }
585