1/******************************************************************************* 2 * Copyright 2019-2021 Microchip FPGA Embedded Systems Solutions. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * MPFS HAL Embedded Software 7 * 8 */ 9/******************************************************************************* 10 * 11 * file name : mpfs_envm.ld 12 * Use with Bare metal startup code. 13 * Startup code runs from envm on MSS reset 14 * 15 * You can find details on the PolarFireSoC Memory map in the mpfs-memory-hierarchy.md 16 * which can be found under the link below: 17 * https://github.com/polarfire-soc/polarfire-soc-documentation 18 * 19 */ 20 21OUTPUT_ARCH( "riscv" ) 22ENTRY(_start) 23 24/*----------------------------------------------------------------------------- 25 26-- MSS hart Reset vector 27 28The MSS reset vector for each hart is stored securely in the MPFS. 29The most common usage will be where the reset vector for each hart will be set 30to the start of the envm at address 0x2022_0100, giving 128K-256B of contiguous 31non-volatile storage. Normally this is where the initial boot-loader will 32reside. (Note: The first 256B page of envm is used for metadata associated with 33secure boot. When not using secure boot (mode 0,1), this area is still reserved 34by convention. It allows easier transition from non-secure to secure boot flow 35during the development process. 36When debugging a bare metal program that is run out of reset from envm, a linker 37script will be used whereby the program will run from LIM instead of envm. 38In this case, the reset vector in the linker script is normally set to the 39start of LIM, 0x0800_0000. 40This means you are not continually programming the envm each time you load a 41program and there is no limitation with break points when debugging. 42See the mpfs-lim.ld example linker script when runing from LIM. 43 44------------------------------------------------------------------------------*/ 45 46 47MEMORY 48{ 49 /* In this example, our reset vector is set to point to the */ 50 /* start at page 1 of the envm */ 51 envm (rx) : ORIGIN = 0x20220100, LENGTH = 128k - 0x100 52 dtim (rwx) : ORIGIN = 0x01000000, LENGTH = 7k 53 e51_itim (rwx) : ORIGIN = 0x01800000, LENGTH = 28k 54 u54_1_itim (rwx) : ORIGIN = 0x01808000, LENGTH = 28k 55 u54_2_itim (rwx) : ORIGIN = 0x01810000, LENGTH = 28k 56 u54_3_itim (rwx) : ORIGIN = 0x01818000, LENGTH = 28k 57 u54_4_itim (rwx) : ORIGIN = 0x01820000, LENGTH = 28k 58 l2lim (rwx) : ORIGIN = 0x08000000, LENGTH = 256k 59 scratchpad(rwx) : ORIGIN = 0x0A000000, LENGTH = 256k 60 /* This 1K of DTIM is used to run code when switching the envm clock */ 61 switch_code_dtim (rx) : ORIGIN = 0x01001c00, LENGTH = 1k 62 /* DDR sections example */ 63 ddr_cached_32bit (rwx) : ORIGIN = 0x80000000, LENGTH = 768M 64 ddr_non_cached_32bit (rwx) : ORIGIN = 0xC0000000, LENGTH = 256M 65 ddr_wcb_32bit (rwx) : ORIGIN = 0xD0000000, LENGTH = 256M 66 ddr_cached_38bit (rwx) : ORIGIN = 0x1000000000, LENGTH = 1024M 67 ddr_non_cached_38bit (rwx) : ORIGIN = 0x1400000000, LENGTH = 0k 68 ddr_wcb_38bit (rwx) : ORIGIN = 0x1800000000, LENGTH = 0k 69} 70 71HEAP_SIZE = 8k; /* needs to be calculated for your application */ 72 73/* 74 * There is common area for shared variables, accessed from a pointer in a harts HLS 75 */ 76SIZE_OF_COMMON_HART_MEM = 4k; 77 78/* 79 * The stack size needs to be calculated for your 80 * application. It must be Must be aligned 81 * Also Thread local storage (AKA hart local storage) is allocated for each hart 82 * as part of the stack 83 * So the memory map will look like once apportion in startup code: 84 * stack hart0 Actual Stack size = (STACK_SIZE_PER_HART - HLS_DEBUG_AREA_SIZE) 85 * TLS hart 0 86 * stack hart1 87 * TLS hart 1 88 * etc 89 * note: HLS_DEBUG_AREA_SIZE is defined in mss_sw_config.h 90 */ 91 92/* 93 * Stack size for each hart's application. 94 * These are the stack sizes that will be allocated to each hart before starting 95 * each hart's application function, e51(), u54_1(), u54_2(), u54_3(), u54_4(). 96 */ 97STACK_SIZE_E51_APPLICATION = 8k; 98STACK_SIZE_U54_1_APPLICATION = 8k; 99STACK_SIZE_U54_2_APPLICATION = 8k; 100STACK_SIZE_U54_3_APPLICATION = 8k; 101STACK_SIZE_U54_4_APPLICATION = 8k; 102 103SECTIONS 104{ 105 PROVIDE(__envm_start = ORIGIN(envm)); 106 PROVIDE(__envm_end = ORIGIN(envm) + LENGTH(envm)); 107 PROVIDE(__l2lim_start = ORIGIN(l2lim)); 108 PROVIDE(__l2lim_end = ORIGIN(l2lim) + LENGTH(l2lim)); 109 PROVIDE(__ddr_cached_32bit_start = ORIGIN(ddr_cached_32bit)); 110 PROVIDE(__ddr_cached_32bit_end = ORIGIN(ddr_cached_32bit) + LENGTH(ddr_cached_32bit)); 111 PROVIDE(__ddr_non_cached_32bit_start = ORIGIN(ddr_non_cached_32bit)); 112 PROVIDE(__ddr_non_cached_32bit_end = ORIGIN(ddr_non_cached_32bit) + LENGTH(ddr_non_cached_32bit)); 113 PROVIDE(__ddr_wcb_32bit_start = ORIGIN(ddr_wcb_32bit)); 114 PROVIDE(__ddr_wcb_32bit_end = ORIGIN(ddr_wcb_32bit) + LENGTH(ddr_wcb_32bit)); 115 PROVIDE(__ddr_cached_38bit_start = ORIGIN(ddr_cached_38bit)); 116 PROVIDE(__ddr_cached_38bit_end = ORIGIN(ddr_cached_38bit) + LENGTH(ddr_cached_38bit)); 117 PROVIDE(__ddr_non_cached_38bit_start = ORIGIN(ddr_non_cached_38bit)); 118 PROVIDE(__ddr_non_cached_38bit_end = ORIGIN(ddr_non_cached_38bit) + LENGTH(ddr_non_cached_38bit)); 119 PROVIDE(__ddr_wcb_38bit_start = ORIGIN(ddr_wcb_38bit)); 120 PROVIDE(__ddr_wcb_38bit_end = ORIGIN(ddr_wcb_38bit) + LENGTH(ddr_wcb_38bit)); 121 PROVIDE(__dtim_start = ORIGIN(dtim)); 122 PROVIDE(__dtim_end = ORIGIN(dtim) + LENGTH(dtim)); 123 PROVIDE(__e51itim_start = ORIGIN(e51_itim)); 124 PROVIDE(__e51itim_end = ORIGIN(e51_itim) + LENGTH(e51_itim)); 125 PROVIDE(__u54_1_itim_start = ORIGIN(u54_1_itim)); 126 PROVIDE(__u54_1_itim_end = ORIGIN(u54_1_itim) + LENGTH(u54_1_itim)); 127 PROVIDE(__u54_2_itim_start = ORIGIN(u54_2_itim)); 128 PROVIDE(__u54_2_itim_end = ORIGIN(u54_2_itim) + LENGTH(u54_2_itim)); 129 PROVIDE(__u54_3_itim_start = ORIGIN(u54_3_itim)); 130 PROVIDE(__u54_3_itim_end = ORIGIN(u54_3_itim) + LENGTH(u54_3_itim)); 131 PROVIDE(__u54_4_itim_start = ORIGIN(u54_4_itim)); 132 PROVIDE(__u54_4_itim_end = ORIGIN(u54_4_itim) + LENGTH(u54_4_itim)); 133 134 . = __envm_start; 135 .text : ALIGN(0x10) 136 { 137 __text_load = LOADADDR(.text); 138 __text_start = .; 139 *(.text.init) 140 /* *entry.o(.text); */ 141 . = ALIGN(0x10); 142 *(.text .text.* .gnu.linkonce.t.*) 143 *(.plt) 144 . = ALIGN(0x10); 145 146 KEEP (*crtbegin.o(.ctors)) 147 KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors)) 148 KEEP (*(SORT(.ctors.*))) 149 KEEP (*crtend.o(.ctors)) 150 KEEP (*crtbegin.o(.dtors)) 151 KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors)) 152 KEEP (*(SORT(.dtors.*))) 153 KEEP (*crtend.o(.dtors)) 154 155 *(.rodata .rodata.* .gnu.linkonce.r.*) 156 *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) 157 *(.gcc_except_table) 158 *(.eh_frame_hdr) 159 *(.eh_frame) 160 161 KEEP (*(.init)) 162 KEEP (*(.fini)) 163 164 PROVIDE_HIDDEN (__preinit_array_start = .); 165 KEEP (*(.preinit_array)) 166 PROVIDE_HIDDEN (__preinit_array_end = .); 167 PROVIDE_HIDDEN (__init_array_start = .); 168 KEEP (*(SORT(.init_array.*))) 169 KEEP (*(.init_array)) 170 PROVIDE_HIDDEN (__init_array_end = .); 171 PROVIDE_HIDDEN (__fini_array_start = .); 172 KEEP (*(.fini_array)) 173 KEEP (*(SORT(.fini_array.*))) 174 PROVIDE_HIDDEN (__fini_array_end = .); 175 176 *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) 177 *(.srodata*) 178 179 . = ALIGN(0x10); 180 __text_end = .; 181 } > envm 182 183 .l2_scratchpad : ALIGN(0x10) 184 { 185 __l2_scratchpad_load = LOADADDR(.l2_scratchpad); 186 __l2_scratchpad_start = .; 187 __l2_scratchpad_vma_start = .; 188 *(.l2_scratchpad) 189 . = ALIGN(0x10); 190 __l2_scratchpad_end = .; 191 __l2_scratchpad_vma_end = .; 192 } >scratchpad AT> envm 193 194 /* 195 * The .ram_code section will contain the code that is run from RAM. 196 * We are using this code to switch the clocks including envm clock. 197 * This can not be done when running from envm 198 * This will need to be copied to ram, before any of this code is run. 199 */ 200 .ram_code : 201 { 202 . = ALIGN (4); 203 __sc_load = LOADADDR (.ram_code); 204 __sc_start = .; 205 *(.ram_codetext) /* .ram_codetext sections (code) */ 206 *(.ram_codetext*) /* .ram_codetext* sections (code) */ 207 *(.ram_coderodata) /* read-only data (constants) */ 208 *(.ram_coderodata*) 209 . = ALIGN (4); 210 __sc_end = .; 211 } >switch_code_dtim AT>envm 212 213 /* 214 * The .ddr_code section will contain the code that is run from DDR. 215 * This is to verify DDR working as expeted 216 */ 217 .ddr_code : 218 { 219 . = ALIGN (4); 220 __ddr_load = LOADADDR (.ram_code); 221 __ddr_start = .; 222 *(.ddr_codetext) /* .ram_codetext sections (code) */ 223 *(.ddr_codetext*) /* .ram_codetext* sections (code) */ 224 *(.ddr_coderodata) /* read-only data (constants) */ 225 *(.ddr_coderodata*) 226 . = ALIGN (4); 227 __ddr_end = .; 228 } >ddr_cached_32bit AT>envm 229 230 /* short/global data section */ 231 .sdata : ALIGN(0x10) 232 { 233 __sdata_load = LOADADDR(.sdata); 234 __sdata_start = .; 235 /* offset used with gp(gloabl pointer) are +/- 12 bits, so set 236 point to middle of expected sdata range */ 237 /* If sdata more than 4K, linker used direct addressing. 238 Perhaps we should add check/warning to linker script if sdata is > 4k */ 239 __global_pointer$ = . + 0x800; 240 *(.sdata .sdata.* .gnu.linkonce.s.*) 241 . = ALIGN(0x10); 242 __sdata_end = .; 243 } > l2lim AT > envm 244 245 /* data section */ 246 .data : ALIGN(0x10) 247 { 248 __data_load = LOADADDR(.data); 249 __data_start = .; 250 *(.got.plt) *(.got) 251 *(.shdata) 252 *(.data .data.* .gnu.linkonce.d.*) 253 . = ALIGN(0x10); 254 __data_end = .; 255 } > l2lim AT > envm 256 257 /* sbss section */ 258 .sbss : ALIGN(0x10) 259 { 260 __sbss_start = .; 261 *(.sbss .sbss.* .gnu.linkonce.sb.*) 262 *(.scommon) 263 . = ALIGN(0x10); 264 __sbss_end = .; 265 } > l2lim 266 267 /* sbss section */ 268 .bss : ALIGN(0x10) 269 { 270 __bss_start = .; 271 *(.shbss) 272 *(.bss .bss.* .gnu.linkonce.b.*) 273 *(COMMON) 274 . = ALIGN(0x10); 275 __bss_end = .; 276 } > l2lim 277 278 /* End of uninitialized data segment */ 279 _end = .; 280 281 .heap : ALIGN(0x10) 282 { 283 __heap_start = .; 284 . += HEAP_SIZE; 285 __heap_end = .; 286 . = ALIGN(0x10); 287 _heap_end = __heap_end; 288 } > l2lim 289 290 /* must be on 4k boundary (0x1000) - corresponds to page size, when using 291 memory mem */ 292 /* protection */ 293 /* .stack : ALIGN(0x1000) */ 294 .stack : ALIGN(0x10) 295 { 296 PROVIDE(__stack_bottom_h0$ = .); 297 PROVIDE(__app_stack_bottom_h0 = .); 298 . += STACK_SIZE_E51_APPLICATION; 299 PROVIDE(__app_stack_top_h0 = .); 300 PROVIDE(__stack_top_h0$ = .); 301 302 PROVIDE(__stack_bottom_h1$ = .); 303 PROVIDE(__app_stack_bottom_h1$ = .); 304 . += STACK_SIZE_U54_1_APPLICATION; 305 PROVIDE(__app_stack_top_h1 = .); 306 PROVIDE(__stack_top_h1$ = .); 307 308 PROVIDE(__stack_bottom_h2$ = .); 309 PROVIDE(__app_stack_bottom_h2 = .); 310 . += STACK_SIZE_U54_2_APPLICATION; 311 PROVIDE(__app_stack_top_h2 = .); 312 PROVIDE(__stack_top_h2$ = .); 313 314 PROVIDE(__stack_bottom_h3$ = .); 315 PROVIDE(__app_stack_bottom_h3 = .); 316 . += STACK_SIZE_U54_3_APPLICATION; 317 PROVIDE(__app_stack_top_h3 = .); 318 PROVIDE(__stack_top_h3$ = .); 319 320 PROVIDE(__stack_bottom_h4$ = .); 321 PROVIDE(__app_stack_bottom_h4 = .); 322 . += STACK_SIZE_U54_4_APPLICATION; 323 PROVIDE(__app_stack_top_h4 = .); 324 PROVIDE(__stack_top_h4$ = .); 325 326 /* place __start_of_free_lim$ after last allocation of l2_lim */ 327 . = ALIGN(0x10); 328 PROVIDE(__start_of_free_lim$ = .); 329 } > l2lim 330 331 /* 332 * memory shared accross harts. 333 * The boot Hart Local Storage holds a pointer to this area for each hart if 334 * when enabled by setting MPFS_HAL_SHARED_MEM_ENABLED define in the 335 * mss_sw_config.h 336 */ 337 .app_hart_common : /* ALIGN(0x1000) */ 338 { 339 PROVIDE(__app_hart_common_start = .); 340 . += SIZE_OF_COMMON_HART_MEM; 341 PROVIDE(__app_hart_common_end = .); 342 } > l2lim 343} 344 345