1 /*
2 * Copyright (C) 2024, Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 /*
8 * @file freertos/xlnx/sys.c
9 * @brief machine specific system primitives implementation.
10 */
11
12 #include <metal/compiler.h>
13 #include <metal/io.h>
14 #include <metal/sys.h>
15 #include <metal/utilities.h>
16 #include <stdint.h>
17 #include "xil_cache.h"
18 #include "xil_exception.h"
19 #include "xscugic.h"
20 #include "xil_mmu.h"
21
22 #if (defined(__aarch64__) || defined(ARMA53_32)) && !defined(SDT)
23
24 #ifdef VERSAL_NET
25 #include "xcpu_cortexa78.h"
26 #elif defined(versal)
27 #include "xcpu_cortexa72.h"
28 #else
29 #include "xreg_cortexa53.h"
30 #endif /* defined(versal) */
31
32 #elif defined(ARMR5)
33
34 #include "xil_mpu.h"
35 #include "xreg_cortexr5.h"
36
37 #endif /* (defined(__aarch64__) || defined(ARMA53_32)) && !defined(SDT) */
38
sys_irq_restore_enable(unsigned int flags)39 void sys_irq_restore_enable(unsigned int flags)
40 {
41 Xil_ExceptionEnableMask(~flags);
42 }
43
sys_irq_save_disable(void)44 unsigned int sys_irq_save_disable(void)
45 {
46 unsigned int state = mfcpsr() & XIL_EXCEPTION_ALL;
47
48 if (state != XIL_EXCEPTION_ALL) {
49 Xil_ExceptionDisableMask(XIL_EXCEPTION_ALL);
50 }
51 return state;
52 }
53
metal_machine_cache_flush(void * addr,unsigned int len)54 void metal_machine_cache_flush(void *addr, unsigned int len)
55 {
56 if (!addr && !len)
57 Xil_DCacheFlush();
58 else
59 Xil_DCacheFlushRange((intptr_t)addr, len);
60 }
61
metal_machine_cache_invalidate(void * addr,unsigned int len)62 void metal_machine_cache_invalidate(void *addr, unsigned int len)
63 {
64 if (!addr && !len)
65 Xil_DCacheInvalidate();
66 else
67 Xil_DCacheInvalidateRange((intptr_t)addr, len);
68 }
69
70 /**
71 * @brief poll function until some event happens
72 */
metal_generic_default_poll(void)73 void metal_weak metal_generic_default_poll(void)
74 {
75 metal_asm volatile("wfi");
76 }
77
78 /*
79 * VERSAL_NET is used since XMpu_Config structure is
80 * different for r52(versal net) and r5(zynqmp) to avoid build failure
81 */
82 #ifdef VERSAL_NET
metal_machine_io_mem_map_versal_net(void * va,metal_phys_addr_t pa,size_t size,unsigned int flags)83 void *metal_machine_io_mem_map_versal_net(void *va, metal_phys_addr_t pa,
84 size_t size, unsigned int flags)
85 {
86 void *__attribute__((unused)) physaddr;
87 u32 req_end_addr = pa + size;
88 XMpu_Config mpu_config;
89 u32 req_addr = pa;
90 u32 mmap_req = 1;
91 u32 base_end_addr;
92 u32 cnt;
93
94 /* Get the MPU Config enties */
95 Xil_GetMPUConfig(mpu_config);
96
97 for (cnt = 0; cnt < MAX_POSSIBLE_MPU_REGS; cnt++) {
98
99 if (!(mpu_config[cnt].flags & XMPU_VALID_REGION))
100 continue;
101
102 base_end_addr = mpu_config[cnt].Size + mpu_config[cnt].BaseAddress;
103
104 if (mpu_config[cnt].BaseAddress <= req_addr && base_end_addr >= req_end_addr) {
105 /*
106 * Mapping available for requested region in MPU table
107 * If no change in Attribute for region then additional
108 * mapping in MPU table is not required
109 */
110 if (mpu_config[cnt].Attribute == flags) {
111 mmap_req = 0;
112 break;
113 }
114 }
115 }
116
117 /* if mapping is required we call Xil_MemMap to get the mapping done */
118 if (mmap_req == 1) {
119 physaddr = Xil_MemMap(pa, size, flags);
120 metal_assert(physaddr == (void *)pa);
121 }
122
123 return va;
124 }
125 #endif
126
metal_machine_io_mem_map(void * va,metal_phys_addr_t pa,size_t size,unsigned int flags)127 void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa,
128 size_t size, unsigned int flags)
129 {
130 void *__attribute__((unused)) physaddr;
131
132 #ifdef VERSAL_NET
133 va = metal_machine_io_mem_map_versal_net(va, pa, size, flags);
134 #else
135 physaddr = Xil_MemMap(pa, size, flags);
136 metal_assert(physaddr == (void *)pa);
137 #endif
138 return va;
139 }
140