1 /*
2  * Copyright (c) 2019-2024 Arm Limited. All rights reserved.
3  * Copyright (c) 2023 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *     http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 #include "tfm_hal_device_header.h"
21 #include "utilities.h"
22 #include "common_target_cfg.h"
23 #include "Driver_PPC.h"
24 #include "Driver_MPC.h"
25 #include "region_defs.h"
26 #include "tfm_plat_defs.h"
27 #include "region.h"
28 #include "dma350_lib.h"
29 #include "device_definition.h"
30 
31 /* Throw out bus error when an access causes security violation */
32 #define CMSDK_SECRESPCFG_BUS_ERR_MASK   (1UL)
33 
34 /* The section names come from the scatter file */
35 REGION_DECLARE(Load$$LR$$, LR_NS_PARTITION, $$Base);
36 #ifdef CONFIG_TFM_USE_TRUSTZONE
37 REGION_DECLARE(Image$$, ER_VENEER, $$Base);
38 REGION_DECLARE(Image$$, VENEER_ALIGN, $$Limit);
39 #endif
40 
41 const struct memory_region_limits memory_regions = {
42 #ifdef RSE_XIP
43     .non_secure_code_start = RSE_RUNTIME_NS_XIP_BASE_NS,
44 
45     .non_secure_partition_base = RSE_RUNTIME_NS_XIP_BASE_NS,
46 
47     .non_secure_partition_limit = RSE_RUNTIME_NS_XIP_BASE_NS +
48                                   NS_PARTITION_SIZE - 1,
49 #else
50     .non_secure_code_start =
51         (uint32_t)&REGION_NAME(Load$$LR$$, LR_NS_PARTITION, $$Base) +
52         BL2_HEADER_SIZE,
53 
54     .non_secure_partition_base =
55         (uint32_t)&REGION_NAME(Load$$LR$$, LR_NS_PARTITION, $$Base),
56 
57     .non_secure_partition_limit =
58         (uint32_t)&REGION_NAME(Load$$LR$$, LR_NS_PARTITION, $$Base) +
59         NS_PARTITION_SIZE - 1,
60 #endif /* RSE_XIP */
61 
62 #ifdef CONFIG_TFM_USE_TRUSTZONE
63     .veneer_base = (uint32_t)&REGION_NAME(Image$$, ER_VENEER, $$Base),
64     .veneer_limit = (uint32_t)&REGION_NAME(Image$$, VENEER_ALIGN, $$Limit) - 1,
65 #endif
66 };
67 
68 /* DMA350 checker layer has to know the TCM remaps */
69 /* Subordinate TCM Interface provides system access only to TCM internal to each
70  * CPU. The memory map presented on the interface for TCM access are defined by
71  * TRM. Below address remaps by adding offset provides access to respective
72  * CPU instruction and data TCM.
73  */
74 
75 /* TCM memories addresses from perspective of cpu0
76  * 0x0000_0000 .. 0x00ff_ffff    NS ITCM
77  * 0x1000_0000 .. 0x10ff_ffff    S ITCM
78  * 0x2000_0000 .. 0x20ff_ffff    NS DTCM
79  * 0x3000_0000 .. 0x30ff_ffff    S DTCM
80 */
81 
82 const struct dma350_remap_range_t dma350_address_remap_list[] = {
83     /* Non-secure CPU ITCM */
84     {.begin = 0x00000000, .end = 0x00FFFFFF, .offset = 0x0A000000},
85     /* Secure CPU ITCM */
86     {.begin = 0x10000000, .end = 0x10FFFFFF, .offset = 0x0A000000},
87     /* Non-secure CPU DTCM */
88     {.begin = 0x20000000, .end = 0x20FFFFFF, .offset = 0x04000000},
89     /* Secure CPU DTCM */
90     {.begin = 0x30000000, .end = 0x30FFFFFF, .offset = 0x04000000}
91 };
92 
93 const struct dma350_remap_list_t dma350_address_remap = {
94     .size = sizeof(dma350_address_remap_list)/
95             sizeof(dma350_address_remap_list[0]),
96     .map = dma350_address_remap_list
97 };
98 
99 /* Configures the RAM region to NS callable in sacfg block's nsccfg register */
100 #define RAMNSC  0x2
101 /* Configures the CODE region to NS callable in sacfg block's nsccfg register */
102 #define CODENSC  0x1
103 
104 /* Import MPC drivers */
105 extern ARM_DRIVER_MPC Driver_VM0_MPC;
106 extern ARM_DRIVER_MPC Driver_VM1_MPC;
107 extern ARM_DRIVER_MPC Driver_SIC_MPC;
108 
109 /* Import PPC drivers */
110 extern DRIVER_PPC_RSE Driver_PPC_RSE_MAIN0;
111 extern DRIVER_PPC_RSE Driver_PPC_RSE_MAIN_EXP0;
112 extern DRIVER_PPC_RSE Driver_PPC_RSE_MAIN_EXP1;
113 extern DRIVER_PPC_RSE Driver_PPC_RSE_MAIN_EXP2;
114 extern DRIVER_PPC_RSE Driver_PPC_RSE_MAIN_EXP3;
115 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH0;
116 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH1;
117 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH_EXP0;
118 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH_EXP1;
119 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH_EXP2;
120 extern DRIVER_PPC_RSE Driver_PPC_RSE_PERIPH_EXP3;
121 
122 /* Define Peripherals NS address range for the platform */
123 #define PERIPHERALS_BASE_NS_START      (0x40000000)
124 #define PERIPHERALS_BASE_NS_END        (0x4FFFFFFF)
125 
126 /* Enable system reset request for CPU 0 */
127 #define ENABLE_CPU0_SYSTEM_RESET_REQUEST (1U << 8U)
128 
129 /* To write into AIRCR register, 0x5FA value must be write to the VECTKEY field,
130  * otherwise the processor ignores the write.
131  */
132 #define SCB_AIRCR_WRITE_MASK ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos))
133 
134 /* Debug configuration flags */
135 #define SPNIDEN_SEL_STATUS (0x01u << 7)
136 #define SPNIDEN_STATUS     (0x01u << 6)
137 #define SPIDEN_SEL_STATUS  (0x01u << 5)
138 #define SPIDEN_STATUS      (0x01u << 4)
139 #define NIDEN_SEL_STATUS   (0x01u << 3)
140 #define NIDEN_STATUS       (0x01u << 2)
141 #define DBGEN_SEL_STATUS   (0x01u << 1)
142 #define DBGEN_STATUS       (0x01u << 0)
143 
144 #define All_SEL_STATUS (SPNIDEN_SEL_STATUS | SPIDEN_SEL_STATUS | \
145                         NIDEN_SEL_STATUS | DBGEN_SEL_STATUS)
146 
147 static DRIVER_PPC_RSE *const ppc_bank_drivers[] = {
148     &Driver_PPC_RSE_MAIN0,
149     &Driver_PPC_RSE_MAIN_EXP0,
150     &Driver_PPC_RSE_MAIN_EXP1,
151     &Driver_PPC_RSE_MAIN_EXP2,
152     &Driver_PPC_RSE_MAIN_EXP3,
153     &Driver_PPC_RSE_PERIPH0,
154     &Driver_PPC_RSE_PERIPH1,
155     &Driver_PPC_RSE_PERIPH_EXP0,
156     &Driver_PPC_RSE_PERIPH_EXP1,
157     &Driver_PPC_RSE_PERIPH_EXP2,
158     &Driver_PPC_RSE_PERIPH_EXP3,
159 };
160 
161 #define PPC_BANK_COUNT (sizeof(ppc_bank_drivers)/sizeof(ppc_bank_drivers[0]))
162 
enable_fault_handlers(void)163 enum tfm_plat_err_t enable_fault_handlers(void)
164 {
165     /* Explicitly set secure fault priority to the highest */
166     NVIC_SetPriority(SecureFault_IRQn, 0);
167 
168     /* Enables BUS, MEM, USG and Secure faults */
169     SCB->SHCSR |= SCB_SHCSR_USGFAULTENA_Msk
170                   | SCB_SHCSR_BUSFAULTENA_Msk
171                   | SCB_SHCSR_MEMFAULTENA_Msk
172                   | SCB_SHCSR_SECUREFAULTENA_Msk;
173     return TFM_PLAT_ERR_SUCCESS;
174 }
175 
system_reset_cfg(void)176 enum tfm_plat_err_t system_reset_cfg(void)
177 {
178     struct rse_sysctrl_t *sysctrl = (struct rse_sysctrl_t *)RSE_SYSCTRL_BASE_S;
179     uint32_t reg_value = SCB->AIRCR;
180 
181     /* Enable system reset request for CPU 0, to be triggered via
182      * NVIC_SystemReset function.
183      */
184     sysctrl->reset_mask |= ENABLE_CPU0_SYSTEM_RESET_REQUEST;
185 
186     /* Clear SCB_AIRCR_VECTKEY value */
187     reg_value &= ~(uint32_t)(SCB_AIRCR_VECTKEY_Msk);
188 
189     /* Enable system reset request only to the secure world */
190     reg_value |= (uint32_t)(SCB_AIRCR_WRITE_MASK | SCB_AIRCR_SYSRESETREQS_Msk);
191 
192     SCB->AIRCR = reg_value;
193 
194     return TFM_PLAT_ERR_SUCCESS;
195 }
196 
197 /*--------------------- NVIC interrupt NS/S configuration --------------------*/
nvic_interrupt_target_state_cfg(void)198 enum tfm_plat_err_t nvic_interrupt_target_state_cfg(void)
199 {
200     uint8_t i;
201 
202     /* Target every interrupt to NS; unimplemented interrupts will be WI */
203     for (i = 0; i < (sizeof(NVIC->ITNS) / sizeof(NVIC->ITNS[0])); i++) {
204         NVIC->ITNS[i] = 0xFFFFFFFF;
205     }
206 
207     /* Make sure that MPC and PPC are targeted to S state */
208     NVIC_ClearTargetState(MPC_IRQn);
209     NVIC_ClearTargetState(PPC_IRQn);
210 
211     /* Make sure that SAM interrupts target S state */
212     NVIC_ClearTargetState(SAM_Critical_Sec_Fault_S_IRQn);
213     NVIC_ClearTargetState(SAM_Sec_Fault_S_IRQn);
214     NVIC_ClearTargetState(SRAM_TRAM_ECC_Err_S_IRQn);
215     NVIC_ClearTargetState(SRAM_ECC_Partial_Write_S_IRQn);
216 
217     return TFM_PLAT_ERR_SUCCESS;
218 }
219 
220 /*----------------- NVIC interrupt enabling for S peripherals ----------------*/
nvic_interrupt_enable(void)221 enum tfm_plat_err_t nvic_interrupt_enable(void)
222 {
223     int32_t ret = ARM_DRIVER_OK;
224     int32_t i = 0;
225 
226     /* MPC interrupt enabling */
227     mpc_clear_irq();
228 
229     ret = Driver_VM0_MPC.EnableInterrupt();
230     if (ret != ARM_DRIVER_OK) {
231         ERROR_MSG("Failed to Enable MPC interrupt for VM0!");
232         return TFM_PLAT_ERR_SYSTEM_ERR;
233     }
234 
235     ret = Driver_VM1_MPC.EnableInterrupt();
236     if (ret != ARM_DRIVER_OK) {
237         ERROR_MSG("Failed to Enable MPC interrupt for VM1!");
238         return TFM_PLAT_ERR_SYSTEM_ERR;
239     }
240 
241     NVIC_ClearPendingIRQ(MPC_IRQn);
242     NVIC_EnableIRQ(MPC_IRQn);
243 
244     /* PPC interrupt enabling */
245     ppc_clear_irq();
246 
247     for (i = 0; i < PPC_BANK_COUNT; i++)  {
248         ret = ppc_bank_drivers[i]->EnableInterrupt();
249         if (ret != ARM_DRIVER_OK) {
250             ERROR_MSG("Failed to Enable interrupt on PPC");
251             return TFM_PLAT_ERR_SYSTEM_ERR;
252         }
253     }
254 
255     NVIC_ClearPendingIRQ(PPC_IRQn);
256     NVIC_EnableIRQ(PPC_IRQn);
257 
258     /* Enable SAM interrupts. Set SAM critical security fault to the highest
259      * priority and other SAM faults to one lower priority.
260      */
261     NVIC_SetPriority(SAM_Critical_Sec_Fault_S_IRQn, 0);
262     NVIC_SetPriority(SAM_Sec_Fault_S_IRQn, 1);
263     NVIC_SetPriority(SRAM_TRAM_ECC_Err_S_IRQn, 1);
264     NVIC_SetPriority(SRAM_ECC_Partial_Write_S_IRQn, 1);
265 
266     NVIC_EnableIRQ(SAM_Critical_Sec_Fault_S_IRQn);
267     NVIC_EnableIRQ(SAM_Sec_Fault_S_IRQn);
268     NVIC_EnableIRQ(SRAM_TRAM_ECC_Err_S_IRQn);
269     NVIC_EnableIRQ(SRAM_ECC_Partial_Write_S_IRQn);
270 
271     /* Set the SAM watchdog counter to trigger if NMI, Critical Sec Fault
272      * interrupt or Sec Fault interrupt are not handled within 64K cycles.
273      */
274     sam_set_watchdog_counter_initial_value(&SAM_DEV_S, 0xFFFF,
275                                            SAM_RESPONSE_NMI |
276                                            SAM_RESPONSE_CRITICAL_FAULT_INTERRUPT |
277                                            SAM_RESPONSE_FAULT_INTERRUPT);
278 
279     /* Set watchdog event response to reset */
280     sam_set_event_response(&SAM_DEV_S, SAM_EVENT_WATCHDOG_TIMER,
281                            SAM_RESPONSE_COLD_RESET, true);
282 
283     return TFM_PLAT_ERR_SUCCESS;
284 }
285 
init_debug(void)286 enum tfm_plat_err_t init_debug(void)
287 {
288     struct rse_sysctrl_t *sysctrl = (struct rse_sysctrl_t *)RSE_SYSCTRL_BASE_S;
289 
290 #if defined(DAUTH_NONE)
291     /* Set all the debug enable selector bits to 1 */
292     sysctrl->secdbgset = All_SEL_STATUS;
293     /* Set all the debug enable bits to 0 */
294     sysctrl->secdbgclr =
295                    DBGEN_STATUS | NIDEN_STATUS | SPIDEN_STATUS | SPNIDEN_STATUS;
296 #elif defined(DAUTH_NS_ONLY)
297     /* Set all the debug enable selector bits to 1 */
298     sysctrl->secdbgset = All_SEL_STATUS;
299     /* Set the debug enable bits to 1 for NS, and 0 for S mode */
300     sysctrl->secdbgset = DBGEN_STATUS | NIDEN_STATUS;
301     sysctrl->secdbgclr = SPIDEN_STATUS | SPNIDEN_STATUS;
302 #elif defined(DAUTH_FULL)
303     /* Set all the debug enable selector bits to 1 */
304     sysctrl->secdbgset = All_SEL_STATUS;
305     /* Set all the debug enable bits to 1 */
306     sysctrl->secdbgset =
307                    DBGEN_STATUS | NIDEN_STATUS | SPIDEN_STATUS | SPNIDEN_STATUS;
308 #else
309 
310 #if !defined(DAUTH_CHIP_DEFAULT)
311 #error "No debug authentication setting is provided."
312 #endif
313 
314     /* Set all the debug enable selector bits to 0 */
315     sysctrl->secdbgclr = All_SEL_STATUS;
316 
317     /* No need to set any enable bits because the value depends on
318      * input signals.
319      */
320 #endif
321     return TFM_PLAT_ERR_SUCCESS;
322 }
323 
324 /*------------------- SAU/IDAU configuration functions -----------------------*/
sau_and_idau_cfg(void)325 void sau_and_idau_cfg(void)
326 {
327     struct rse_sacfg_t *sacfg = (struct rse_sacfg_t *)RSE_SACFG_BASE_S;
328 
329     /* Ensure all memory accesses are completed */
330     __DMB();
331 
332     /* Enables SAU */
333     TZ_SAU_Enable();
334 
335     /* Configures SAU regions to be non-secure */
336     SAU->RNR  = 0;
337     SAU->RBAR = (memory_regions.non_secure_partition_base
338                  & SAU_RBAR_BADDR_Msk);
339     SAU->RLAR = (memory_regions.non_secure_partition_limit
340                   & SAU_RLAR_LADDR_Msk) | SAU_RLAR_ENABLE_Msk;
341 
342     SAU->RNR  = 1;
343     SAU->RBAR = (NS_DATA_START & SAU_RBAR_BADDR_Msk);
344     SAU->RLAR = (NS_DATA_LIMIT & SAU_RLAR_LADDR_Msk) | SAU_RLAR_ENABLE_Msk;
345 
346 #ifdef CONFIG_TFM_USE_TRUSTZONE
347     /* Configures veneers region to be non-secure callable */
348     SAU->RNR  = 2;
349     SAU->RBAR = (memory_regions.veneer_base & SAU_RBAR_BADDR_Msk);
350     SAU->RLAR = (memory_regions.veneer_limit & SAU_RLAR_LADDR_Msk)
351                  | SAU_RLAR_ENABLE_Msk | SAU_RLAR_NSC_Msk;
352 #endif
353 
354     /* Configure the peripherals space */
355     SAU->RNR  = 3;
356     SAU->RBAR = (PERIPHERALS_BASE_NS_START & SAU_RBAR_BADDR_Msk);
357     SAU->RLAR = (PERIPHERALS_BASE_NS_END & SAU_RLAR_LADDR_Msk)
358                   | SAU_RLAR_ENABLE_Msk;
359 
360     /* Configure the host access space */
361     SAU->RNR  = 4;
362     SAU->RBAR = (HOST_ACCESS_BASE_NS & SAU_RBAR_BADDR_Msk);
363     SAU->RLAR = (HOST_ACCESS_LIMIT_NS & SAU_RLAR_LADDR_Msk)
364                   | SAU_RLAR_ENABLE_Msk;
365 
366     /* Allows SAU to define the CODE region as NSC when XIP is enabled or the
367      * RAM region otherwise
368      */
369 #ifdef RSE_XIP
370     sacfg->nsccfg |= CODENSC;
371 #else
372     sacfg->nsccfg |= RAMNSC;
373 #endif
374 
375     /* Configure MSC to enable secure accesses for the DMA */
376     sacfg->nsmscexp = 0x0;
377 
378     /* Ensure the write is completed and flush pipeline */
379     __DSB();
380     __ISB();
381 
382 }
383 
384 /*------------------- Memory configuration functions -------------------------*/
mpc_init_cfg(void)385 enum tfm_plat_err_t mpc_init_cfg(void)
386 {
387     int32_t ret = ARM_DRIVER_OK;
388 
389     ret = Driver_VM0_MPC.Initialize();
390     if (ret != ARM_DRIVER_OK) {
391         return TFM_PLAT_ERR_SYSTEM_ERR;
392     }
393     ret = Driver_VM1_MPC.Initialize();
394     if (ret != ARM_DRIVER_OK) {
395         return TFM_PLAT_ERR_SYSTEM_ERR;
396     }
397 #ifdef RSE_XIP
398     ret = Driver_SIC_MPC.Initialize();
399     if (ret != ARM_DRIVER_OK) {
400         return TFM_PLAT_ERR_SYSTEM_ERR;
401     }
402 #endif /* RSE_XIP */
403 
404     /* Configuring primary non-secure partition.
405      * It is ensured in flash_layout.h that these memory regions are located in
406      * VM1 SRAM device. */
407 
408     ret = Driver_VM1_MPC.ConfigRegion(NS_DATA_START,
409                                       NS_DATA_LIMIT,
410                                       ARM_MPC_ATTR_NONSECURE);
411     if (ret != ARM_DRIVER_OK) {
412         return TFM_PLAT_ERR_SYSTEM_ERR;
413     }
414 
415 #ifdef RSE_XIP
416     ret = Driver_SIC_MPC.ConfigRegion(memory_regions.non_secure_partition_base,
417                                       memory_regions.non_secure_partition_limit,
418                                       ARM_MPC_ATTR_NONSECURE);
419 #else
420     ret = Driver_VM1_MPC.ConfigRegion(memory_regions.non_secure_partition_base,
421                                       memory_regions.non_secure_partition_limit,
422                                       ARM_MPC_ATTR_NONSECURE);
423 #endif /* !RSE_XIP */
424     if (ret != ARM_DRIVER_OK) {
425         return TFM_PLAT_ERR_SYSTEM_ERR;
426     }
427 
428     /* Lock down the MPC configuration */
429     ret = Driver_VM0_MPC.LockDown();
430     if (ret != ARM_DRIVER_OK) {
431         return ret;
432     }
433 
434     ret = Driver_VM1_MPC.LockDown();
435     if (ret != ARM_DRIVER_OK) {
436         return ret;
437     }
438 #ifdef RSE_XIP
439     ret = Driver_SIC_MPC.LockDown();
440     if (ret != ARM_DRIVER_OK) {
441         return ret;
442     }
443 #endif /* RSE_XIP */
444 
445     /* Add barriers to assure the MPC configuration is done before continue
446      * the execution.
447      */
448     __DSB();
449     __ISB();
450 
451     return TFM_PLAT_ERR_SUCCESS;
452 }
453 
mpc_clear_irq(void)454 void mpc_clear_irq(void)
455 {
456     Driver_VM0_MPC.ClearInterrupt();
457     Driver_VM1_MPC.ClearInterrupt();
458 #ifdef RSE_XIP
459     Driver_SIC_MPC.ClearInterrupt();
460 #endif /* RSE_XIP */
461 }
462 
463 /*------------------- PPC configuration functions -------------------------*/
ppc_init_cfg(void)464 enum tfm_plat_err_t ppc_init_cfg(void)
465 {
466     struct rse_sacfg_t *sacfg = (struct rse_sacfg_t *)RSE_SACFG_BASE_S;
467     int32_t err = ARM_DRIVER_OK;
468 
469     /* Initialize not used PPC drivers */
470     err |= Driver_PPC_RSE_MAIN0.Initialize();
471     err |= Driver_PPC_RSE_MAIN_EXP0.Initialize();
472     err |= Driver_PPC_RSE_MAIN_EXP1.Initialize();
473     err |= Driver_PPC_RSE_MAIN_EXP2.Initialize();
474     err |= Driver_PPC_RSE_MAIN_EXP3.Initialize();
475     err |= Driver_PPC_RSE_PERIPH0.Initialize();
476     err |= Driver_PPC_RSE_PERIPH1.Initialize();
477     err |= Driver_PPC_RSE_PERIPH_EXP0.Initialize();
478     err |= Driver_PPC_RSE_PERIPH_EXP1.Initialize();
479     err |= Driver_PPC_RSE_PERIPH_EXP2.Initialize();
480     err |= Driver_PPC_RSE_PERIPH_EXP3.Initialize();
481 
482     /*
483      * Configure the response to a security violation as a
484      * bus error instead of RAZ/WI
485      */
486     sacfg->secrespcfg |= CMSDK_SECRESPCFG_BUS_ERR_MASK;
487 
488     if (err != ARM_DRIVER_OK) {
489         return TFM_PLAT_ERR_SYSTEM_ERR;
490     }
491 
492     return TFM_PLAT_ERR_SUCCESS;
493 }
494 
ppc_configure_to_secure(ppc_bank_t bank,uint32_t pos)495 void ppc_configure_to_secure(ppc_bank_t bank, uint32_t pos)
496 {
497     DRIVER_PPC_RSE *ppc_driver;
498 
499     if (bank >= PPC_BANK_COUNT) {
500         return;
501     }
502 
503     ppc_driver = ppc_bank_drivers[bank];
504     if (ppc_driver) {
505         ppc_driver->ConfigSecurity(pos, PPC_RSE_SECURE_CONFIG);
506     }
507 }
508 
ppc_configure_to_non_secure(ppc_bank_t bank,uint32_t pos)509 void ppc_configure_to_non_secure(ppc_bank_t bank, uint32_t pos)
510 {
511     DRIVER_PPC_RSE *ppc_driver;
512 
513     if (bank >= PPC_BANK_COUNT) {
514         return;
515     }
516 
517     ppc_driver = ppc_bank_drivers[bank];
518     if (ppc_driver) {
519         ppc_driver->ConfigSecurity(pos, PPC_RSE_NONSECURE_CONFIG);
520     }
521 }
522 
ppc_en_secure_unpriv(ppc_bank_t bank,uint32_t pos)523 void ppc_en_secure_unpriv(ppc_bank_t bank, uint32_t pos)
524 {
525     DRIVER_PPC_RSE *ppc_driver;
526 
527     if (bank >= PPC_BANK_COUNT) {
528         return;
529     }
530 
531     ppc_driver = ppc_bank_drivers[bank];
532     if (ppc_driver) {
533         ppc_driver->ConfigPrivilege(pos,
534                                     PPC_RSE_SECURE_CONFIG,
535                                     PPC_RSE_PRIV_AND_NONPRIV_CONFIG);
536     }
537 }
538 
ppc_clr_secure_unpriv(ppc_bank_t bank,uint32_t pos)539 void ppc_clr_secure_unpriv(ppc_bank_t bank, uint32_t pos)
540 {
541     DRIVER_PPC_RSE *ppc_driver;
542 
543     if (bank >= PPC_BANK_COUNT) {
544         return;
545     }
546 
547     ppc_driver = ppc_bank_drivers[bank];
548     if (ppc_driver) {
549         ppc_driver->ConfigPrivilege(pos,
550                                     PPC_RSE_SECURE_CONFIG,
551                                     PPC_RSE_PRIV_CONFIG);
552     }
553 }
554 
ppc_clear_irq(void)555 void ppc_clear_irq(void)
556 {
557     int32_t i = 0;
558 
559     for (i = 0; i < PPC_BANK_COUNT; i++) {
560         ppc_bank_drivers[i]->ClearInterrupt();
561     }
562 }
563 
564 static struct dma350_ch_dev_t *const dma350_channel_list[DMA350_DMA0_CHANNEL_COUNT] = {
565     &DMA350_DMA0_CH0_DEV_S,
566     &DMA350_DMA0_CH1_DEV_S,
567     &DMA350_DMA0_CH2_DEV_S,
568     &DMA350_DMA0_CH3_DEV_S
569 };
570 
571 /*------------------- DMA configuration functions -------------------------*/
dma_init_cfg(void)572 enum tfm_plat_err_t dma_init_cfg(void)
573 {
574     enum dma350_error_t dma_err;
575     enum dma350_ch_error_t ch_err;
576     struct dma350_ch_dev_t *dma_ch_ptr;
577     int32_t i;
578 
579     dma_err = dma350_init(&DMA350_DMA0_DEV_S);
580     if(dma_err != DMA350_ERR_NONE) {
581         ERROR_MSG("DMA350_DMA0_DEV_S init failed!");
582         return TFM_PLAT_ERR_SYSTEM_ERR;
583     }
584 
585     /* Initialise and set all available DMA channels to privilege and secure */
586     for (i = 0; i < DMA350_DMA0_CHANNEL_COUNT; i++) {
587         dma_ch_ptr = dma350_channel_list[i];
588 
589         ch_err = dma350_ch_init(dma_ch_ptr);
590         if(ch_err != DMA350_CH_ERR_NONE) {
591             ERROR_MSG("DMA350 channel init failed");
592             return TFM_PLAT_ERR_SYSTEM_ERR;
593         }
594 
595         dma_err = dma350_set_ch_privileged(&DMA350_DMA0_DEV_S, i);
596         if(dma_err != DMA350_ERR_NONE) {
597             ERROR_MSG("Failed to set DMA350 channel privileged!");
598             return TFM_PLAT_ERR_SYSTEM_ERR;
599         }
600 
601         dma_err = dma350_set_ch_secure(&DMA350_DMA0_DEV_S, i);
602         if(dma_err != DMA350_ERR_NONE) {
603             ERROR_MSG("Failed to set DMA350 channel secure!");
604             return TFM_PLAT_ERR_SYSTEM_ERR;
605         }
606     }
607 
608     /* FIXME: Use combined secure interrupt because there are no channel IRQs */
609     DMA350_DMA0_DEV_S.cfg->dma_sec_ctrl->SEC_CTRL |= 0x1UL; /* INTREN_ANYCHINTR */
610     DMA350_DMA0_DEV_S.cfg->dma_nsec_ctrl->NSEC_CTRL |= 0x1UL; /* INTREN_ANYCHINTR */
611 
612     return TFM_PLAT_ERR_SUCCESS;
613 }
614