1
2;/*
3; * Copyright (c) 2018-2021 Arm Limited. All rights reserved.
4; *
5; * Licensed under the Apache License, Version 2.0 (the "License");
6; * you may not use this file except in compliance with the License.
7; * You may obtain a copy of the License at
8; *
9; *     http://www.apache.org/licenses/LICENSE-2.0
10; *
11; * Unless required by applicable law or agreed to in writing, software
12; * distributed under the License is distributed on an "AS IS" BASIS,
13; * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14; * See the License for the specific language governing permissions and
15; * limitations under the License.
16; *
17; */
18
19;#include "region_defs.h"
20
21LR_CODE S_DDR4_START {
22    ER_CODE S_DDR4_START {
23        *.o (RESET +First)
24        .ANY (+RO)
25        /* different test vectors */
26        * (InRoot$$Sections)
27    }
28
29    /*
30     * Place the CMSE Veneers (containing the SG instruction) after the code, in
31     * a separate 32 bytes aligned region so that the SAU can programmed to just
32     * set this region as Non-Secure Callable. The maximum size of this
33     * executable region makes it only used the space left over by the ER_CODE
34     * region so that you can rely on code+veneer size combined will not exceed
35     * the S_CODE_SIZE value. We also substract from the available space the
36     * area used to align this section on 32 bytes boundary (for SAU conf).
37     */
38    ER_CODE_CMSE_VENEER +0 ALIGN 32 {
39        *(Veneer$$CMSE)
40    }
41    /*
42     * This dummy region ensures that the next one will be aligned on a 32 bytes
43     * boundary, so that the following region will not be mistakenly configured
44     * as Non-Secure Callable by the SAU.
45     */
46    ER_CODE_CMSE_VENEER_DUMMY +0 ALIGN 32 EMPTY 0 {}
47
48    /* This empty, zero long execution region is here to mark the limit address
49     * of the last execution region that is allocated in SRAM.
50     */
51    CODE_WATERMARK +0 EMPTY 0x0 {
52    }
53    /* Make sure that the sections allocated in the SRAM does not exceed the
54     * size of the SRAM available.
55     */
56    ScatterAssert(ImageLimit(CODE_WATERMARK) <= S_DDR4_START + S_DDR4_SIZE)
57
58    ER_DATA +0 ALIGN 32  {
59        .ANY (+ZI +RW)
60    }
61
62    #if HEAP_SIZE > 0
63    ARM_LIB_HEAP +0 ALIGN 8 EMPTY  HEAP_SIZE  {   ; Reserve empty region for heap
64    }
65    #endif
66
67    ARM_LIB_STACK +0 ALIGN 32 EMPTY STACK_SIZE {   ; Reserve empty region for stack
68    }
69
70    /* This empty, zero long execution region is here to mark the limit address
71     * of the last execution region that is allocated in SRAM.
72     */
73    SRAM_WATERMARK +0 EMPTY 0x0 {
74    }
75    /* Make sure that the sections allocated in the SRAM does not exceed the
76     * size of the SRAM available.
77     */
78    ScatterAssert(ImageLimit(SRAM_WATERMARK) <= S_DDR4_START + S_DDR4_SIZE)
79}
80