1 /*
2 * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "sam_drv.h"
18 #include "sam_reg_map.h"
19 #include "tfm_hal_device_header.h"
20
21 #define ARRAY_LEN(x) (sizeof(x) / sizeof((x)[0]))
22
23 /* Index in the sames, samecl, samem or samim register arrays for event_id */
24 #define SAMEx_IDX(event_id) ((uint32_t)(event_id) >> 0x5UL)
25
26 /* Mask for event_id in the sames, samecl, samem or samim register */
27 #define SAMEx_MASK(event_id) (0x1UL << ((uint32_t)(event_id) & 0x1FUL))
28
29 /* Index in the samrrls register array of response for event_id */
30 #define SAMRRLS_IDX(event_id) ((uint32_t)(event_id) >> 0x3UL)
31
32 /* Offset within the samrrls register of response for event_id */
33 #define SAMRRLS_OFF(event_id) (((uint32_t)(event_id) & 0x7UL) << 0x2UL)
34
35 /* Mask for response in the samrrls register for event_id */
36 #define SAMRRLS_MASK(event_id) (0xFUL << SAMRRLS_OFF(event_id))
37
zero_count(uint32_t val)38 static uint32_t zero_count(uint32_t val)
39 {
40 uint32_t res = 32;
41
42 while (val != 0) {
43 val &= val - 1;
44 res--;
45 }
46
47 return res;
48 }
49
log2(uint32_t val)50 static uint32_t log2(uint32_t val)
51 {
52 uint32_t res = 0;
53
54 while (val >>= 1) {
55 res++;
56 }
57
58 return res;
59 }
60
sam_init(const struct sam_dev_t * dev)61 enum sam_error_t sam_init(const struct sam_dev_t *dev)
62 {
63 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
64 volatile uint32_t *sam_cfg_base = regs->samem;
65
66 /* Write the default config */
67 for (size_t i = 0; i < SAM_CONFIG_LEN; i++) {
68 sam_cfg_base[i] = dev->cfg->default_config[i];
69 }
70
71 return SAM_ERROR_NONE;
72 }
73
sam_enable_event(const struct sam_dev_t * dev,enum sam_event_id_t event_id)74 enum sam_error_t sam_enable_event(const struct sam_dev_t *dev,
75 enum sam_event_id_t event_id)
76 {
77 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
78
79 if (event_id > SAM_MAX_EVENT_ID) {
80 return SAM_ERROR_INVALID_ARGUMENT;
81 }
82
83 if (!(regs->samim[SAMEx_IDX(event_id)] & SAMEx_MASK(event_id))) {
84 regs->samim[SAMEx_IDX(event_id)] |= SAMEx_MASK(event_id);
85 /* Update integrity check value, one lower zero count */
86 regs->samicv--;
87 }
88
89 return SAM_ERROR_NONE;
90 }
91
sam_disable_event(const struct sam_dev_t * dev,enum sam_event_id_t event_id)92 enum sam_error_t sam_disable_event(const struct sam_dev_t *dev,
93 enum sam_event_id_t event_id)
94 {
95 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
96
97 if (event_id > SAM_MAX_EVENT_ID) {
98 return SAM_ERROR_INVALID_ARGUMENT;
99 }
100
101 if (regs->samim[SAMEx_IDX(event_id)] & SAMEx_MASK(event_id)) {
102 regs->samim[SAMEx_IDX(event_id)] &= ~SAMEx_MASK(event_id);
103 /* Update integrity check value, one higher zero count */
104 regs->samicv++;
105 }
106
107 return SAM_ERROR_NONE;
108 }
109
sam_set_event_response(const struct sam_dev_t * dev,enum sam_event_id_t event_id,enum sam_response_t response,bool enable_response)110 enum sam_error_t sam_set_event_response(const struct sam_dev_t *dev,
111 enum sam_event_id_t event_id,
112 enum sam_response_t response,
113 bool enable_response)
114 {
115 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
116 uint32_t old_reg_val;
117 uint32_t new_reg_val;
118 uint32_t rrl_val;
119
120 if (event_id > SAM_MAX_EVENT_ID || response > SAM_MAX_RESPONSE_ACTION ||
121 response == SAM_RESPONSE_NONE) {
122 return SAM_ERROR_INVALID_ARGUMENT;
123 }
124
125 old_reg_val = regs->samrrls[SAMRRLS_IDX(event_id)];
126
127 rrl_val = ((uint32_t)enable_response << 3UL) | log2((uint32_t)response);
128
129 new_reg_val = (old_reg_val & ~SAMRRLS_MASK(event_id)) |
130 ((rrl_val << SAMRRLS_OFF(event_id)) & SAMRRLS_MASK(event_id));
131
132 regs->samrrls[SAMRRLS_IDX(event_id)] = new_reg_val;
133
134 /* Update integrity check value with the difference in zero count */
135 regs->samicv += zero_count(new_reg_val) - zero_count(old_reg_val);
136
137 return SAM_ERROR_NONE;
138 }
139
sam_set_watchdog_counter_initial_value(const struct sam_dev_t * dev,uint32_t count_value,enum sam_response_t responses)140 void sam_set_watchdog_counter_initial_value(const struct sam_dev_t *dev,
141 uint32_t count_value,
142 enum sam_response_t responses)
143 {
144 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
145 uint32_t prev_zero_count = zero_count(regs->samwdciv);
146
147 uint32_t wdciv_val = (count_value & 0x3FFFFFFUL) |
148 ((((uint32_t)responses >> 2UL) & 0x3FUL) << 26UL);
149
150 regs->samwdciv = wdciv_val;
151
152 /* Update integrity check value with the difference in zero count */
153 regs->samicv += zero_count(wdciv_val) - prev_zero_count;
154 }
155
sam_register_event_handler(struct sam_dev_t * dev,enum sam_event_id_t event_id,sam_event_handler_t event_handler)156 enum sam_error_t sam_register_event_handler(struct sam_dev_t *dev,
157 enum sam_event_id_t event_id,
158 sam_event_handler_t event_handler)
159 {
160 if (event_id > SAM_MAX_EVENT_ID) {
161 return SAM_ERROR_INVALID_ARGUMENT;
162 }
163
164 dev->event_handlers[event_id] = event_handler;
165
166 return SAM_ERROR_NONE;
167 }
168
sam_handle_event(const struct sam_dev_t * dev)169 void sam_handle_event(const struct sam_dev_t *dev)
170 {
171 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
172 uint32_t reg_idx;
173 uint32_t event_id;
174 uint32_t mask;
175 uint32_t sames_val;
176 uint32_t samecl_val;
177
178 /* Iterate over each bit position in each of the SAMES registers to check if
179 * the corresponding event ID is pending.
180 */
181 for (reg_idx = 0; reg_idx < ARRAY_LEN(regs->sames); reg_idx++) {
182 sames_val = regs->sames[reg_idx];
183 samecl_val = 0;
184
185 /* Skip ECC events, which have their own separate interrupt handlers */
186 if (reg_idx == SAMEx_IDX(SAM_EVENT_SRAM_PARTIAL_WRITE)) {
187 sames_val &= ~(SAMEx_MASK(SAM_EVENT_SRAM_PARTIAL_WRITE) |
188 SAMEx_MASK(SAM_EVENT_VM0_SINGLE_ECC_ERROR) |
189 SAMEx_MASK(SAM_EVENT_VM1_SINGLE_ECC_ERROR) |
190 SAMEx_MASK(SAM_EVENT_VM2_SINGLE_ECC_ERROR) |
191 SAMEx_MASK(SAM_EVENT_VM3_SINGLE_ECC_ERROR));
192 }
193
194 /* Check each bit position until all pending event have been handled
195 * (when the clear value equals the status value). In most cases there
196 * will only be one pending event.
197 */
198 for (event_id = reg_idx << 5UL, mask = 1;
199 event_id <= SAM_MAX_EVENT_ID && samecl_val != sames_val;
200 event_id++, mask <<= 1) {
201 if (sames_val & mask) {
202 if (dev->event_handlers[event_id]) {
203 dev->event_handlers[event_id]();
204 }
205 samecl_val |= mask;
206 }
207 }
208
209 if (samecl_val != 0) {
210 regs->samecl[reg_idx] = samecl_val;
211 }
212 }
213 }
214
sam_handle_partial_write(const struct sam_dev_t * dev)215 void sam_handle_partial_write(const struct sam_dev_t *dev)
216 {
217 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
218 volatile uint64_t *vm_ptr;
219 size_t i;
220
221 /* Handle any partial writes by reading & writing-back the affected memory
222 * address.
223 */
224 for (i = 0; i < ARRAY_LEN(regs->vmpwca); i++) {
225 vm_ptr = (volatile uint64_t *)regs->vmpwca[i];
226 if (vm_ptr) {
227 *vm_ptr = *vm_ptr;
228 regs->vmpwca[i] = 0;
229 }
230 }
231
232 /* Clear partial write error event */
233 regs->samecl[SAMEx_IDX(SAM_EVENT_SRAM_PARTIAL_WRITE)] =
234 SAMEx_MASK(SAM_EVENT_SRAM_PARTIAL_WRITE);
235 }
236
sam_handle_single_ecc_error(const struct sam_dev_t * dev)237 void sam_handle_single_ecc_error(const struct sam_dev_t *dev)
238 {
239 struct sam_reg_map_t *regs = (struct sam_reg_map_t *)dev->cfg->base;
240 volatile uint64_t *vm_ptr;
241 size_t i;
242
243 /* Handle any single ECC error events by reading & writing-back the affected
244 * memory address.
245 */
246 for (i = 0; i < ARRAY_LEN(regs->vmsceeca); i++) {
247 if (regs->sames[0] & SAMEx_MASK(SAM_EVENT_VM0_SINGLE_ECC_ERROR + i)) {
248 vm_ptr = (volatile uint64_t *)regs->vmsceeca[i];
249 if (vm_ptr) {
250 *vm_ptr = *vm_ptr;
251 regs->vmsceeca[i] = 0;
252 }
253 }
254 }
255
256 /* Clear single ECC error events */
257 regs->samecl[SAMEx_IDX(SAM_EVENT_VM0_SINGLE_ECC_ERROR)] =
258 SAMEx_MASK(SAM_EVENT_VM0_SINGLE_ECC_ERROR) |
259 SAMEx_MASK(SAM_EVENT_VM1_SINGLE_ECC_ERROR) |
260 SAMEx_MASK(SAM_EVENT_VM2_SINGLE_ECC_ERROR) |
261 SAMEx_MASK(SAM_EVENT_VM3_SINGLE_ECC_ERROR);
262 }
263