1 /*
2 * Copyright (c) 2004-2015 Cadence Design Systems Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <xtensa/config/core.h>
24
25 #if XCHAL_HAVE_MPU
26 #include <xtensa/core-macros.h>
27 #include <xtensa/hal.h>
28 #include <string.h>
29 #include <stdlib.h>
30
31 /*
32 * General notes:
33 * Wherever an address is represented as an unsigned, it has only the 27 most significant bits. This is how
34 * the addresses are represented in the MPU. It has the benefit that we don't need to worry about overflow.
35 *
36 * The asserts in the code are ignored unless an assert handler is set (as it is during testing).
37 *
38 * If an assert handler is set, then the MPU map is checked for correctness after every update.
39 *
40 * On some configs (actually all configs right now), the MPU entries must be aligned to the background map.
41 * The constant: XCHAL_MPU_ALIGN_REQ indicates if alignment is required:
42 *
43 * The rules for a valid map are:
44 *
45 * 1) The entries' vStartAddress fields must always be in non-descending order.
46 * 2) The entries' memoryType and accessRights must contain valid values
47 *
48 * If XCHAL_MPU_ALIGN_REQ == 1 then the following additional rules are enforced:
49 * 3) If entry0's Virtual Address Start field is nonzero, then that field must equal one of the
50 * Background Map's Virtual Address Start field values if software ever intends to assert entry0's MPUENB bit.
51 * 4) If entryN's MPUENB bit will ever be negated while at the same time entryN+1's MPUENB bit is asserted,
52 * then entryN+1's Virtual Address Start field must equal one of the Background Map's Virtual Address Start field values.
53 *
54 * The internal function are first, and the external 'xthal_' functions are at the end.
55 *
56 */
57 extern void (*_xthal_assert_handler)();
58 extern void xthal_write_map_raw(const xthal_MPU_entry* fg, unsigned int n);
59 extern void xthal_read_map_raw(const xthal_MPU_entry* fg);
60 extern xthal_MPU_entry _xthal_get_entry(const xthal_MPU_entry* fg, const xthal_MPU_entry* bg,
61 unsigned int addr, int* infgmap);
62
63 #define MPU_ADDRESS_MASK (0xffffffff << XCHAL_MPU_ALIGN_BITS)
64 #define MPU_ALIGNMENT_MASK (0xffffffff - MPU_ADDRESS_MASK)
65
66 #define MPU_VSTART_CORRECTNESS_MASK ((0x1 << (XCHAL_MPU_ALIGN_BITS)) - 1)
67 // Set this to 1 for more extensive internal checking / 0 for production
68 #define MPU_DEVELOPMENT_MODE 0
69
70 #if XCHAL_MPU_ALIGN_REQ
71 #define XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION 3
72 #else
73 #define XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION 2
74 #endif
75
76 /*
77 * At some point it is faster to commit/invalidate the entire cache rather than going on line at a time.
78 * If a region is bigger than 'CACHE_REGION_THRESHOLD' we operate on the entire cache.
79 */
80 #if XCHAL_DCACHE_LINESIZE
81 #define CACHE_REGION_THRESHOLD (32 * XCHAL_DCACHE_LINESIZE / XCHAL_MPU_ALIGN)
82 #else
83 #define CACHE_REGION_THRESHOLD 0
84 #endif
85
86
87 /*
88 * Normally these functions are no-ops, but the MPU test harness sets an assert handler to detect any inconsistencies in MPU
89 * entries or any other unexpected internal condition.
90 */
91 #if MPU_DEVELOPMENT_MODE
my_assert(int arg)92 static void my_assert(int arg)
93 {
94 if (_xthal_assert_handler && !arg)
95 _xthal_assert_handler();
96 }
97
assert_map_valid()98 static void assert_map_valid()
99 {
100
101 if (_xthal_assert_handler)
102 {
103 xthal_MPU_entry fg[XCHAL_MPU_ENTRIES];
104 xthal_read_map(fg);
105 if (xthal_check_map(fg, XCHAL_MPU_ENTRIES))
106 _xthal_assert_handler();
107 }
108 }
109
assert_attributes_equivalent(unsigned addr,const xthal_MPU_entry * initial,const xthal_MPU_entry * fg,const xthal_MPU_entry * bg)110 static void assert_attributes_equivalent(unsigned addr, const xthal_MPU_entry* initial,
111 const xthal_MPU_entry* fg, const xthal_MPU_entry* bg)
112 {
113
114 xthal_MPU_entry e1 = _xthal_get_entry(initial, bg, addr, 0);
115 xthal_MPU_entry e2 = _xthal_get_entry(fg, bg, addr, 0);
116 my_assert((XTHAL_MPU_ENTRY_GET_ACCESS(e1) == XTHAL_MPU_ENTRY_GET_ACCESS(e2)) && (XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(e1) == XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(e2)));
117 }
118
assert_maps_equivalent(const xthal_MPU_entry * initial,const xthal_MPU_entry * fg,const xthal_MPU_entry * bg)119 static void assert_maps_equivalent(const xthal_MPU_entry* initial, const xthal_MPU_entry* fg,
120 const xthal_MPU_entry* bg)
121 {
122 /* this function checks that for every address the MPU entries 'initial' result in the same attributes as the entries in 'fg'.
123 * We only need to check at the addresses that appear in 'initial', 'fg', or 'bg'.
124 */
125 int i;
126 for (i = 0; i < XCHAL_MPU_ENTRIES; i++)
127 {
128 assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(initial[i]), initial, fg, bg);
129 assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), initial, fg, bg);
130 }
131 for (i = 0; i < XCHAL_MPU_BACKGROUND_ENTRIES; i++)
132 assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]), initial, fg, bg);
133 }
134 #else
135 #define my_assert(x)
136 #define assert_map_valid(x)
137 #endif
138
139 #if 0
140 // These functions aren't used, but am leaving the definitions in place
141 // for possible future use.
142 static inline unsigned read_mpucfg()
143 {
144 unsigned long tmp;
145 __asm__ __volatile__("rsr.mpucfg %0\n\t"
146 : "=a" (tmp));
147 return tmp;
148 }
149
150 static inline unsigned read_mpuenb()
151 {
152 unsigned long tmp;
153 __asm__ __volatile__("rsr.mpuenb %0\n\t"
154 : "=a" (tmp));
155 return tmp;
156 }
157
158 /* This function writes the enable for the MPU entries */
159 static inline void write_mpuenb(unsigned v)
160 {
161 __asm__ __volatile__("wsr.mpuenb %0\n\t"
162 : : "a" (v));
163 }
164
165 #endif
166
isync()167 static inline void isync()
168 {
169 __asm__ __volatile__("isync\n\t");
170 }
171
172 /* This function writes the cache disable register which
173 * disables the cache by 512MB registers to save power*/
write_cacheadrdis(unsigned v)174 static inline void write_cacheadrdis(unsigned v)
175 {
176 __asm__ __volatile__("wsr.cacheadrdis %0\n\t"
177 : : "a" (v));
178 }
179
180 inline static int is_cacheable(unsigned int mt);
181
182 #if 0
183 static inline void read_map_entry(unsigned en_num, xthal_MPU_entry* en)
184 {
185 unsigned as;
186 unsigned at0;
187 unsigned at1;
188 as = en_num;
189 __asm__ __volatile__("RPTLB0 %0, %1\n\t" : "+a" (at0) : "a" (as));
190 __asm__ __volatile__("RPTLB1 %0, %1\n\t" : "+a" (at1) : "a" (as));
191 en->as = at0;
192 en->at = at1;
193 }
194 #endif
195
is_cacheable(unsigned int mt)196 inline static int is_cacheable(unsigned int mt)
197 {
198 return (0x180 & mt) || ((mt & 0x18) == 0x10) || ((mt & 0x30) == 0x30);
199 }
200
is_writeback(unsigned int mt)201 inline static int is_writeback(unsigned int mt)
202 {
203 return (((0x180 & mt) && (mt & 0x11)) ||
204 ((((mt & 0x18) == 0x10) || ((mt & 0x30) == 0x30)) & 0x1));
205 }
206
is_device(unsigned int mt)207 inline static int is_device(unsigned int mt)
208 {
209 return ((mt & 0x1f0) == 0);
210 }
211
is_kernel_readable(int accessRights)212 inline static int is_kernel_readable(int accessRights)
213 {
214 switch (accessRights)
215 {
216 case XTHAL_AR_R:
217 case XTHAL_AR_Rr:
218 case XTHAL_AR_RX:
219 case XTHAL_AR_RXrx:
220 case XTHAL_AR_RW:
221 case XTHAL_AR_RWX:
222 case XTHAL_AR_RWr:
223 case XTHAL_AR_RWrw:
224 case XTHAL_AR_RWrwx:
225 case XTHAL_AR_RWXrx:
226 case XTHAL_AR_RWXrwx:
227 return 1;
228 case XTHAL_AR_NONE:
229 case XTHAL_AR_Ww:
230 return 0;
231 default:
232 return XTHAL_BAD_ACCESS_RIGHTS;
233 }
234 }
235
is_kernel_writeable(int accessRights)236 inline static int is_kernel_writeable(int accessRights)
237 {
238 switch (accessRights)
239 {
240 case XTHAL_AR_RW:
241 case XTHAL_AR_RWX:
242 case XTHAL_AR_RWr:
243 case XTHAL_AR_RWrw:
244 case XTHAL_AR_RWrwx:
245 case XTHAL_AR_RWXrx:
246 case XTHAL_AR_RWXrwx:
247 case XTHAL_AR_Ww:
248 return 1;
249 case XTHAL_AR_NONE:
250 case XTHAL_AR_R:
251 case XTHAL_AR_Rr:
252 case XTHAL_AR_RX:
253 case XTHAL_AR_RXrx:
254 return 0;
255 default:
256 return XTHAL_BAD_ACCESS_RIGHTS;
257 }
258 }
259
is_kernel_executable(int accessRights)260 inline static int is_kernel_executable(int accessRights)
261 {
262 switch (accessRights)
263 {
264 case XTHAL_AR_RX:
265 case XTHAL_AR_RXrx:
266 case XTHAL_AR_RWX:
267 case XTHAL_AR_RWXrx:
268 case XTHAL_AR_RWXrwx:
269 return 1;
270 case XTHAL_AR_NONE:
271 case XTHAL_AR_Ww:
272 case XTHAL_AR_R:
273 case XTHAL_AR_Rr:
274 case XTHAL_AR_RW:
275 case XTHAL_AR_RWr:
276 case XTHAL_AR_RWrw:
277 case XTHAL_AR_RWrwx:
278 return 0;
279 default:
280 return XTHAL_BAD_ACCESS_RIGHTS;
281 }
282 }
283
is_user_readable(int accessRights)284 inline static int is_user_readable(int accessRights)
285 {
286 switch (accessRights)
287 {
288 case XTHAL_AR_Rr:
289 case XTHAL_AR_RXrx:
290 case XTHAL_AR_RWr:
291 case XTHAL_AR_RWrw:
292 case XTHAL_AR_RWrwx:
293 case XTHAL_AR_RWXrx:
294 case XTHAL_AR_RWXrwx:
295 return 1;
296 case XTHAL_AR_R:
297 case XTHAL_AR_RX:
298 case XTHAL_AR_RW:
299 case XTHAL_AR_RWX:
300 case XTHAL_AR_NONE:
301 case XTHAL_AR_Ww:
302 return 0;
303 default:
304 return XTHAL_BAD_ACCESS_RIGHTS;
305 }
306 }
307
is_user_writeable(int accessRights)308 inline static int is_user_writeable(int accessRights)
309 {
310 switch (accessRights)
311 {
312 case XTHAL_AR_Ww:
313 case XTHAL_AR_RWrw:
314 case XTHAL_AR_RWrwx:
315 case XTHAL_AR_RWXrwx:
316 return 1;
317 case XTHAL_AR_NONE:
318 case XTHAL_AR_R:
319 case XTHAL_AR_Rr:
320 case XTHAL_AR_RX:
321 case XTHAL_AR_RXrx:
322 case XTHAL_AR_RW:
323 case XTHAL_AR_RWX:
324 case XTHAL_AR_RWr:
325 case XTHAL_AR_RWXrx:
326 return 0;
327 default:
328 return XTHAL_BAD_ACCESS_RIGHTS;
329 }
330 }
331
is_user_executable(int accessRights)332 inline static int is_user_executable(int accessRights)
333 {
334 switch (accessRights)
335 {
336 case XTHAL_AR_RXrx:
337 case XTHAL_AR_RWrwx:
338 case XTHAL_AR_RWXrx:
339 case XTHAL_AR_RWXrwx:
340 return 1;
341 case XTHAL_AR_RW:
342 case XTHAL_AR_RWX:
343 case XTHAL_AR_RWr:
344 case XTHAL_AR_RWrw:
345 case XTHAL_AR_R:
346 case XTHAL_AR_Rr:
347 case XTHAL_AR_RX:
348 case XTHAL_AR_NONE:
349 case XTHAL_AR_Ww:
350 return 0;
351 default:
352 return XTHAL_BAD_ACCESS_RIGHTS;
353 }
354 }
355
356 /* This function returns the map entry that is used for the address 'addr' (27msb).
357 *
358 */
359 #if defined(__SPLIT__mpu_basic)
360
_xthal_get_entry(const xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned int addr,int * infgmap)361 xthal_MPU_entry _xthal_get_entry(const xthal_MPU_entry* fg, const xthal_MPU_entry* bg,
362 unsigned int addr, int* infgmap)
363 {
364 int i;
365 for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--)
366 {
367 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) <= addr)
368 {
369 if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]))
370 {
371 if (infgmap)
372 *infgmap = 1;
373 return fg[i];
374 }
375 else
376 break;
377 }
378 }
379 for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--)
380 {
381 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]) <= addr)
382 {
383 if (infgmap)
384 *infgmap = 0;
385 return bg[i];
386 }
387 }
388 return bg[0]; // never reached ... just to get rid of compilation warning
389 }
390
391 /* returns true if the supplied address (27msb) is in the background map. */
_xthal_in_bgmap(unsigned int address,const xthal_MPU_entry * bg)392 int _xthal_in_bgmap(unsigned int address, const xthal_MPU_entry* bg)
393 {
394 int i;
395 for (i = 0; i < XCHAL_MPU_BACKGROUND_ENTRIES; i++)
396 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]) == address)
397 return 1;
398 return 0;
399 }
400
401 #endif
402
403 #if defined(__SPLIT__mpu_attributes)
404
405 /* This function updates the map entry as well as internal duplicate of the map
406 * state in fg. The assumption is that reading map entries could be somewhat
407 * expensive in some situations so we are keeping a copy of the map in memory when
408 * doing extensive map manipulations.
409 */
write_map_entry(xthal_MPU_entry * fg,unsigned en_num,xthal_MPU_entry en)410 static void write_map_entry(xthal_MPU_entry* fg, unsigned en_num, xthal_MPU_entry en)
411 {
412 en.at = (en.at & 0xffffffe0) | (en_num & 0x1f);
413 xthal_mpu_set_entry(en);
414 assert_map_valid();
415 fg[en_num] = en;
416 }
417
move_map_down(xthal_MPU_entry * fg,int dup,int idx)418 static void move_map_down(xthal_MPU_entry* fg, int dup, int idx)
419 {
420 /* moves the map entry list down one (leaving duplicate entries at idx and idx+1. This function assumes that the last
421 * entry is invalid ... call MUST check this
422 */
423 unsigned int i;
424 for (i = dup; i > idx; i--)
425 {
426 write_map_entry(fg, i, fg[i - 1]);
427 }
428 }
429
move_map_up(xthal_MPU_entry * fg,int dup,int idx)430 static void move_map_up(xthal_MPU_entry* fg, int dup, int idx)
431 {
432 /* moves the map entry list up one (leaving duplicate entries at idx and idx-1, removing the entry at dup
433 */
434 int i;
435 for (i = dup; i < idx - 1; i++)
436 {
437 write_map_entry(fg, i, fg[i + 1]);
438 }
439 }
440
bubble_free_to_ip(xthal_MPU_entry * fg,int ip,int required)441 static int bubble_free_to_ip(xthal_MPU_entry* fg, int ip, int required)
442 {
443 /* This function shuffles the entries in the MPU to get at least 'required' free entries at
444 * the insertion point 'ip'. This function returns the new insertion point (after all the shuffling).
445 */
446 int i;
447 int rv = ip;
448 if (required < 1)
449 return ip;
450 my_assert(required <= XCHAL_MPU_ENTRIES);
451 /* first we search for duplicate or unused entries at an index less than 'ip'. We start looking at ip-1
452 * (rather than 0) to minimize the number of shuffles required.
453 */
454 for (i = ip - 2; i >= 0 && required;)
455 {
456 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i + 1]))
457 {
458 move_map_up(fg, i, ip);
459 rv--;
460 required--;
461 }
462 i--;
463 }
464 // if there are any invalid entries at top of the map, we can remove them to make space
465 while (required)
466 {
467 if (!XTHAL_MPU_ENTRY_GET_VALID(fg[0]))
468 {
469 move_map_up(fg, 0, ip);
470 rv--;
471 required--;
472 }
473 else
474 break;
475 }
476 /* If there are not enough unneeded entries at indexes less than ip, then we search at indexes > ip.
477 * We start the search at ip+1 and move down, again to minimize the number of shuffles required.
478 */
479
480 for (i = ip + 1; i < XCHAL_MPU_ENTRIES && required;)
481 {
482 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1]))
483 {
484 move_map_down(fg, i, ip);
485 required--;
486 }
487 else
488 i++;
489 }
490 my_assert(required == 0);
491 return rv;
492 }
493
494
495 /* This function removes 'inaccessible' entries from the MPU map (those that are hidden by previous entries
496 * in the map). It leaves any entries that match background entries in place.
497 */
remove_inaccessible_entries(xthal_MPU_entry * fg,const xthal_MPU_entry * bg)498 static void remove_inaccessible_entries(xthal_MPU_entry* fg, const xthal_MPU_entry* bg)
499 {
500 int i;
501 for (i = 1; i < XCHAL_MPU_ENTRIES; i++)
502 {
503 if (((XTHAL_MPU_ENTRY_GET_VALID(fg[i]) == XTHAL_MPU_ENTRY_GET_VALID(fg[i - 1])) && (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) > XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1]))
504 && (XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[i]) == XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[i - 1])) && (XTHAL_MPU_ENTRY_GET_ACCESS(fg[i]) == XTHAL_MPU_ENTRY_GET_ACCESS(fg[i - 1])) &&
505 /* we can only remove the background map entry if either background alignment is not required, or
506 * if the previous entry is enabled.
507 */
508 (!_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), bg)))
509 || ((!XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && (!XTHAL_MPU_ENTRY_GET_VALID(fg[i - 1])) && (!_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), bg)))))
510 {
511 write_map_entry(fg, i, fg[i - 1]);
512 }
513 }
514 }
515
516 /* This function takes bitwise or'd combination of access rights and memory type, and extracts
517 * the access rights. It returns the access rights, or -1.
518 */
encode_access_rights(int cattr)519 static int encode_access_rights(int cattr)
520 {
521 cattr = cattr & 0xF;
522 if ((cattr) > 0 && (cattr < 4))
523 return -1;
524 else
525 return cattr;
526 }
527
528 /*
529 * returns the largest value rv, such that for every index < rv,
530 * entrys[index].vStartAddress < first.
531 *
532 * Assumes an ordered entry array (even disabled entries must be ordered).
533 * value returned is in the range [0, XCHAL_MPU_ENTRIES].
534 *
535 */
find_entry(xthal_MPU_entry * fg,unsigned first)536 static int find_entry(xthal_MPU_entry* fg, unsigned first)
537 {
538 int i;
539 for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--)
540 {
541 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) <= first)
542 return i + 1;
543 }
544 return 0; // if it is less than all existing entries return 0
545 }
546
547 /*
548 * This function returns 1 if there is an exact match for first and first+size
549 * so that no manipulations are necessary before safing and updating the attributes
550 * for [first, first+size). The the first and end entries
551 * must be valid, as well as all the entries in between. Otherwise the memory
552 * type might change across the region and we wouldn't be able to safe the caches.
553 *
554 * An alternative would be to require alignment regions in this case, but that seems
555 * more wasteful.
556 */
needed_entries_exist(xthal_MPU_entry * fg,unsigned first,unsigned last)557 static int needed_entries_exist(xthal_MPU_entry* fg, unsigned first, unsigned last)
558 {
559 int i;
560 for (i = 0; i < XCHAL_MPU_ENTRIES; i++)
561 {
562 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == first)
563 {
564 int j;
565 /* special case ... is last at the end of the address space
566 * ... if so there is no end entry needed.
567 */
568 if (last == 0xFFFFFFFF)
569 {
570 int k;
571 for (k = i; k < XCHAL_MPU_ENTRIES; k++)
572 if (!XTHAL_MPU_ENTRY_GET_VALID(fg[k]))
573 return 0;
574 return 1;
575 }
576 /* otherwise search for the end entry */
577 for (j = i; j < XCHAL_MPU_ENTRIES; j++)
578 if (last == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[j]))
579 {
580 int k;
581 for (k = i; k <= j; k++)
582 if (!XTHAL_MPU_ENTRY_GET_VALID(fg[k]))
583 return 0;
584 return 1;
585 }
586 return 0;
587 }
588 }
589 return 0;
590 }
591
592 /* This function computes the number of MPU entries that are available for use in creating a new
593 * region.
594 */
number_available(xthal_MPU_entry * fg)595 static int number_available(xthal_MPU_entry* fg)
596 {
597 int i;
598 int rv = 0;
599 int valid_seen = 0;
600 for (i = 0; i < XCHAL_MPU_ENTRIES; i++)
601 {
602 if (!valid_seen)
603 {
604 if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]))
605 valid_seen = 1;
606 else
607 {
608 rv++;
609 continue;
610 }
611 }
612 else
613 {
614 if (i && (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1])))
615 rv++;
616 }
617 }
618 return rv;
619 }
620
621 /*
622 * This function returns index of the background map entry that maps the address 'first' if there are no
623 * enabled/applicable foreground map entries.
624 */
get_bg_map_index(const xthal_MPU_entry * bg,unsigned first)625 static int get_bg_map_index(const xthal_MPU_entry* bg, unsigned first)
626 {
627 int i;
628 for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--)
629 if (first > XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]))
630 return i;
631 return 0;
632 }
633
covert_to_writethru_memtype(unsigned int wb_memtype)634 inline static unsigned int covert_to_writethru_memtype(unsigned int wb_memtype)
635 {
636 unsigned int prefix = wb_memtype & 0x1f0;
637 if (prefix == 0x10)
638 return wb_memtype & 0xfffffffe;
639 else
640 return wb_memtype & 0xffffffee;
641 }
642
643 /*
644 * This function takes the region pointed to by ip, and makes it safe from the aspect of cache coherency, before
645 * changing the memory type and possibly corrupting the cache. If wb is 0, then that indicates
646 * that we should ignore uncommitted entries. If the inv argument is 0 that indicates that we shouldn't invalidate
647 * the cache before switching to bypass.
648 */
safe_region(xthal_MPU_entry * fg,int ip,unsigned end_of_segment,int memoryType,int wb,int inv,unsigned int * post_inv_all)649 static void safe_region(xthal_MPU_entry* fg, int ip, unsigned end_of_segment, int memoryType, int wb, int inv,
650 unsigned int* post_inv_all)
651 {
652 unsigned length = end_of_segment - XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]); // initially keep length 27msb to avoid possibility of overflow
653 if (!length)
654 return; // if the region is empty, there is no need to safe it
655
656 int cmemType = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[ip]);
657
658 if (memoryType == cmemType)
659 return; // not changing memory types ... we don't need to do anything
660
661 int mt_is_wb = is_writeback(memoryType);
662 int mt_is_ch = is_cacheable(memoryType);
663
664 // nothing needs to be done in these cases
665 if (mt_is_wb || (!wb && (!inv || mt_is_ch)))
666 return;
667
668 int need_flush = wb && (is_writeback(cmemType) && !is_writeback(memoryType));
669 int need_invalidate = inv && (is_cacheable(cmemType) && !is_cacheable(memoryType));
670
671 void* addr = (void*) XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]);
672
673 int write_by_region = length < CACHE_REGION_THRESHOLD;
674
675 if (need_flush)
676 {
677 XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[ip], covert_to_writethru_memtype(XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[ip])));
678 // If the AR == NONE, the writing back the cache may generate exception. Temporarily open up the protections ...
679 // ...
680 if (XTHAL_MPU_ENTRY_GET_ACCESS(fg[ip]) == XTHAL_AR_NONE)
681 XTHAL_MPU_ENTRY_SET_ACCESS(fg[ip], XTHAL_AR_RWXrwx);
682 // bit 0 determines if it wb/wt
683 write_map_entry(fg, ip, fg[ip]);
684 if (!write_by_region)
685 {
686 /* unfortunately there is no straight forward way to avoid the possibility of doing
687 * multiple xthal_dcache_all_writeback() calls during a region update. The reason for this
688 * is that:
689 *
690 * 1) The writeback must be done before the memory type is changed to non-cacheable before
691 * an invalidate (see below)
692 *
693 * 2) it isn't possible to reorganize the loop so that all the writebacks are done before
694 * any of the invalidates because if part of the region of interest is (initially) mapped
695 * by the background map, then a single foreground entry is reused to 'safe' across
696 * each background map entry that is overlapped.
697 */
698 xthal_dcache_all_writeback();
699 }
700 else if (length)
701 xthal_dcache_region_writeback(addr, length);
702 }
703
704 if (need_invalidate)
705 {
706 XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[ip],
707 XTHAL_ENCODE_MEMORY_TYPE(XCHAL_CA_BYPASS));
708 write_map_entry(fg, ip, fg[ip]);
709 /* only need to call all_invalidate once ... check
710 * if it has already been done.
711 */
712 if (!*post_inv_all)
713 {
714 if (!write_by_region)
715 {
716 *post_inv_all = 1;
717 }
718 else if (length)
719 {
720 xthal_icache_region_invalidate(addr, length);
721 xthal_dcache_region_writeback_inv(addr, length);
722 }
723 }
724 }
725 }
726
max(unsigned a,unsigned b,unsigned c)727 static unsigned max(unsigned a, unsigned b, unsigned c)
728 {
729 if (a > b && a > c)
730 return a;
731 else if (b > c)
732 return b;
733 else
734 return c;
735 }
736
737 /* This function returns the next address to commit which will be the greatest of the following:
738 * 1) The start of the region we are creating
739 * 2) The vStartAddress of the previous entry
740 * 3) The background map entry that precedes the current address (last address committed).
741 */
next_address_to_commit(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned first,int current_index)742 static unsigned next_address_to_commit(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first,
743 int current_index)
744 {
745 unsigned current = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[current_index]);
746 return max(first, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[current_index - 1]), XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[get_bg_map_index(bg, current)]));
747 }
748
749 /*
750 * This function does a series of calls to safe_region() to ensure that no data will be corrupted when changing the memory type
751 * of an MPU entry. These calls are made for every entry address in the range[first,end), as well as at any background region boundary
752 * in the range[first,end). In general it is necessary to safe at the background region boundaries, because the memory type could
753 * change at that address.
754 *
755 * This function is written to reuse already needed entries for the background map 'safes' which complicates things somewhat.
756 *
757 * After the calls to safe region are complete, then the entry attributes are updated for every entry in the range [first,end).
758 */
safe_and_commit_overlaped_regions(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned first,unsigned last,int memoryType,int accessRights,int wb,int inv)759 static void safe_and_commit_overlaped_regions(xthal_MPU_entry* fg, const xthal_MPU_entry*bg, unsigned first,
760 unsigned last, int memoryType, int accessRights, int wb, int inv)
761 {
762 int i;
763 unsigned int next;
764 unsigned end_of_segment = last;
765 unsigned post_inv_all = 0;
766 unsigned int cachedisadr;
767 write_cacheadrdis(0);
768 for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--)
769 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < last)
770 {
771 // first we want to commit the first entry
772 safe_region(fg, i, end_of_segment, memoryType, wb, inv, &post_inv_all);
773 end_of_segment = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]);
774 do
775 {
776 next = next_address_to_commit(fg, bg, first, i);
777 if (next == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1]))
778 i--;
779 XTHAL_MPU_ENTRY_SET_VSTARTADDR(fg[i], next);
780 safe_region(fg, i, last, memoryType, wb, inv, &post_inv_all);
781 end_of_segment = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]);
782 } while (next > first);
783 if (post_inv_all)
784 {
785 xthal_icache_all_invalidate();
786 xthal_dcache_all_writeback_inv();
787 }
788 for (; i < XCHAL_MPU_ENTRIES && XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < last; i++)
789 {
790 XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[i], memoryType);
791 XTHAL_MPU_ENTRY_SET_ACCESS(fg[i], accessRights);
792 XTHAL_MPU_ENTRY_SET_VALID(fg[i], 1);
793 write_map_entry(fg, i, fg[i]);
794 }
795 break;
796 }
797 cachedisadr = xthal_calc_cacheadrdis(fg, XCHAL_MPU_ENTRIES);
798 write_cacheadrdis(cachedisadr);
799 }
800
handle_invalid_pred(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned first,int ip)801 static void handle_invalid_pred(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first, int ip)
802 {
803 /* Handle the case where there is an invalid entry immediately preceding the entry we
804 * are creating. If the entries addresses correspond to the same bg map, then we
805 * make the previous entry valid with same attributes as the background map entry.
806 *
807 * The case where an invalid entry exists immediately preceding whose address corresponds to a different
808 * background map entry is handled by create_aligning_entries_if_required(), so nothing is done here.
809 */
810 /* todo ... optimization opportunity, the following block loops through the background map up to 4 times,
811 *
812 */
813 if (!ip || XTHAL_MPU_ENTRY_GET_VALID(fg[ip - 1]))
814 return;
815 {
816 int i;
817 unsigned fgipm1_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1]);
818 int first_in_bg_map = 0;
819 int first_bg_map_index = -1;
820 int fgipm1_bg_map_index = -1;
821 #if MPU_DEVELOPMENT_MODE
822 unsigned fgip_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]);
823 int fgip_bg_map_index = -1;
824 #endif
825 for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--)
826 {
827 unsigned addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]);
828 if (addr == first)
829 first_in_bg_map = 1;
830 if (addr < fgipm1_addr && fgipm1_bg_map_index == -1)
831 fgipm1_bg_map_index = i;
832 #if MPU_DEVELOPMENT_MODE
833 if (addr < fgip_addr && fgip_bg_map_index == -1)
834 fgip_bg_map_index = i;
835 #endif
836 if (addr < first && first_bg_map_index == -1)
837 first_bg_map_index = i;
838 }
839 if (!first_in_bg_map && (first_bg_map_index == fgipm1_bg_map_index))
840 {
841 // There should be a subsequent entry that falls in the address range of same
842 // background map entry ... if not, we have a problem because the following
843 // will corrupt the memory map
844 #if MPU_DEVELOPMENT_MODE
845 {
846 my_assert(fgip_bg_map_index == fgipm1_bg_map_index);
847 }
848 #endif
849 xthal_MPU_entry temp = _xthal_get_entry(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1]), 0);
850 XTHAL_MPU_ENTRY_SET_VSTARTADDR(temp, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1]));
851 write_map_entry(fg, ip - 1, temp);
852 }
853 }
854 }
855
856 /* This function inserts a entry (unless it already exists) with vStartAddress of first. The new entry has
857 * the same accessRights and memoryType as the address first had before the call.
858 *
859 * If 'invalid' is specified, then insert an invalid region if no foreground entry exists for the address 'first'.
860 */
insert_entry_if_needed_with_existing_attr(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned first,int invalid)861 static int insert_entry_if_needed_with_existing_attr(xthal_MPU_entry* fg, const xthal_MPU_entry* bg,
862 unsigned first, int invalid)
863 {
864 int i;
865 int ip;
866 int infg;
867 int found = 0;
868
869 for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--)
870 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == first)
871 {
872 if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]) || invalid)
873 return XTHAL_SUCCESS;
874 else
875 {
876 found = 1;
877 ip = i;
878 break;
879 }
880 }
881
882 if (!found)
883 {
884 if (!number_available(fg))
885 return XTHAL_OUT_OF_ENTRIES;
886
887 ip = find_entry(fg, first);
888 ip = bubble_free_to_ip(fg, ip, 1);
889 }
890 if (!invalid)
891 handle_invalid_pred(fg, bg, first, ip);
892 xthal_MPU_entry n;
893 memset(&n, 0, sizeof(n));
894 n = _xthal_get_entry(fg, bg, first, &infg);
895
896 if (invalid && !infg) // If the entry mapping is currently in the foreground we can't make
897 // the entry invalid without corrupting the attributes of the following entry.
898 XTHAL_MPU_ENTRY_SET_VALID(n, 0);
899 XTHAL_MPU_ENTRY_SET_VSTARTADDR(n,first);
900 write_map_entry(fg, ip, n);
901 return XTHAL_SUCCESS;
902 }
903
smallest_entry_greater_than_equal(xthal_MPU_entry * fg,unsigned x)904 static unsigned int smallest_entry_greater_than_equal(xthal_MPU_entry* fg, unsigned x)
905 {
906 int i;
907 for (i = 0; i < XCHAL_MPU_ENTRIES; i++)
908 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) >= x)
909 return XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]);
910 return 0;
911 }
912
913 /* This function creates background map aligning entries if required.*/
create_aligning_entries_if_required(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned x)914 static unsigned int create_aligning_entries_if_required(xthal_MPU_entry* fg, const xthal_MPU_entry* bg,
915 unsigned x)
916 {
917 #if XCHAL_MPU_ALIGN_REQ
918 int i;
919 int rv;
920 unsigned next_entry_address = 0;
921 unsigned next_entry_valid = 0;
922 int preceding_bg_entry_index_x = get_bg_map_index(bg, x);
923 unsigned preceding_bg_entry_x_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[preceding_bg_entry_index_x]);
924 for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--)
925 {
926 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < x)
927 {
928 if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]))
929 return XTHAL_SUCCESS; // If there is a valid entry immediately before the proposed new entry
930 // ... then no aligning entries are required
931 break;
932 }
933 else
934 {
935 next_entry_address = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]);
936 next_entry_valid = XTHAL_MPU_ENTRY_GET_VALID(fg[i]);
937 }
938 }
939
940 /*
941 * before creating the aligning entry, we may need to create an entry or entries a higher
942 * addresses to limit the scope of the aligning entry.
943 */
944 if ((!next_entry_address) || (!next_entry_valid) || (_xthal_in_bgmap(next_entry_address, bg)))
945 {
946 /* in this case, we can just create an invalid entry at the start of the new region because
947 * a valid entry could have an alignment problem. An invalid entry is safe because we know that
948 * the next entry is either invalid, or is on a bg map entry
949 */
950 if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, x, 1)) != XTHAL_SUCCESS)
951 {
952 return rv;
953 }
954 }
955 else
956 {
957 unsigned next_bg_entry_index;
958 for (next_bg_entry_index = 0; next_bg_entry_index < XCHAL_MPU_BACKGROUND_ENTRIES; next_bg_entry_index++)
959 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index]) > x)
960 break;
961 if (next_entry_address == XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index])) // In this case there is no intervening bg entry
962 // between the new entry x, and the next existing entry so, we don't need any limiting entry
963 // (the existing next_entry serves as the limiting entry)
964 { /* intentionally empty */
965 }
966 else
967 {
968 // In this case we need to create a valid region at the background entry that immediately precedes
969 // next_entry_address, and then create an invalid entry at the background entry immediately after
970 // x
971 if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(_xthal_get_entry(fg, bg, x, 0)), 0))
972 != XTHAL_SUCCESS)
973 {
974 return rv;
975 }
976 if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg,
977 XTHAL_MPU_ENTRY_GET_VSTARTADDR(_xthal_get_entry(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index]), 0)), 1)) != XTHAL_SUCCESS)
978 {
979 return rv;
980 }
981 }
982 }
983
984 /* now we are finally ready to create the aligning entry.*/
985 if (!(x == preceding_bg_entry_x_addr))
986 if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, preceding_bg_entry_x_addr, 0)) != XTHAL_SUCCESS)
987 {
988 return rv;
989 }
990
991 return XTHAL_SUCCESS;
992
993 #else
994 return XTHAL_SUCCESS;
995 #endif
996 }
997
start_initial_region(xthal_MPU_entry * fg,const xthal_MPU_entry * bg,unsigned first,unsigned end)998 static unsigned start_initial_region(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first,
999 unsigned end)
1000 {
1001 int i;
1002 unsigned addr;
1003 for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--)
1004 {
1005 addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]);
1006 if (addr <= first)
1007 break;
1008 if (addr < end)
1009 return addr;
1010 }
1011 return first;
1012 }
1013
safe_add_region(unsigned first,unsigned last,unsigned accessRights,unsigned memoryType,unsigned writeback,unsigned invalidate)1014 static int safe_add_region(unsigned first, unsigned last, unsigned accessRights, unsigned memoryType,
1015 unsigned writeback, unsigned invalidate)
1016 {
1017 /* This function sets the memoryType and accessRights on a region of memory. If necessary additional MPU entries
1018 * are created so that the attributes of any memory outside the specified region are not changed.
1019 *
1020 * This function has 2 stages:
1021 * 1) The map is updated one entry at a time to create (if necessary) new entries to mark the beginning and end of the
1022 * region as well as addition alignment entries if needed. During this stage the map is always correct, and the memoryType
1023 * and accessRights for every address remain the same.
1024 * 2) The entries inside the update region are then safed for cache consistency (if necessary) and then written with
1025 * the new accessRights, and memoryType.
1026 *
1027 * If the function fails (by running out of available map entries) during stage 1 then everything is still consistent and
1028 * it is safe to return an error code.
1029 *
1030 * If XCHAL_MPU_ALIGN_REQ is provided then extra entries are create if needed
1031 * to satisfy these alignment conditions:
1032 *
1033 * 1) If entry0's Virtual Address Start field is nonzero, then that field must equal one of the Background Map's
1034 * Virtual Address Start field values if software ever intends to assert entry0's MPUENB bit.
1035 * 2) If entryN's MPUENB bit will ever be negated while at the same time entryN+1's MPUENB bit is
1036 * asserted, then entryN+1's Virtual Address Start field must equal one of the Background Map's Virtual Address Start field values.
1037 *
1038 * Between 0 and 2 available entries will be used by this function. In addition, if XCHAL_MPU_ALIGN_REQ == 1 up to ???
1039 * additional entries will be needed to meet background map alignment requirements.
1040 *
1041 * This function keeps a copy of the current map in 'fg'. This is kept in sync with contents of the MPU at all times.
1042 *
1043 */
1044
1045 int rv;
1046
1047 xthal_MPU_entry fg[XCHAL_MPU_ENTRIES];
1048 #if MPU_DEVELOPMENT_MODE
1049 xthal_MPU_entry on_entry[XCHAL_MPU_ENTRIES];
1050 xthal_read_map(on_entry);
1051 #endif
1052 xthal_read_map(fg);
1053 assert_map_valid();
1054
1055 /* First we check and see if consecutive entries at first, and first + size already exist.
1056 * in this important special case we don't need to do anything but safe and update the entries [first, first+size).
1057 *
1058 */
1059
1060 if (!needed_entries_exist(fg, first, last))
1061 {
1062 unsigned x;
1063 unsigned pbg;
1064
1065 /*
1066 * If we are tight on entries, the first step is to remove any redundant entries in the MPU
1067 * to make room to ensure that there is room for the new entries we need.
1068 *
1069 * We need to call it here ... once we have started transforming the map it is too late
1070 * (the process involves creating inaccessible entries that could potentially get removed).
1071 */
1072 if (number_available(fg) < XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION)
1073 remove_inaccessible_entries(fg, Xthal_mpu_bgmap);
1074 #if MPU_DEVELOPMENT_MODE
1075 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1076 #endif
1077 // First we create foreground entries that 'duplicate' background entries to aide in
1078 // maintaining proper alignment.
1079 if ((rv = create_aligning_entries_if_required(fg, Xthal_mpu_bgmap, first)) != XTHAL_SUCCESS)
1080 return rv;
1081
1082 // First we write the terminating entry for our region
1083 // 5 cases:
1084 // 1) end is at the end of the address space, then we don't need to do anything ... takes 0 entries
1085 // 2) There is an existing entry at end ... another nop ... 0 entries
1086 // 3) end > than any existing entry ... in this case we just create a new invalid entry at end to mark
1087 // end of the region. No problem with alignment ... this takes 1 entry
1088 // 4) otherwise if there is a background map boundary between end and x ,the smallest existing entry that is
1089 // greater than end, then we first create an equivalent foreground map entry for the background map entry that immediately
1090 // precedes x, and then we write an invalid entry for end. Takes 2 entries
1091 // 5) otherwise x is in the same background map entry as end, in this case we write a new foreground entry with the existing
1092 // attributes at end
1093
1094 if (last == 0xFFFFFFFF)
1095 { /* the end is the end of the address space ... do nothing */
1096 }
1097 else
1098 {
1099 x = smallest_entry_greater_than_equal(fg, last);
1100 if (last == x)
1101 { /* another nop */
1102 }
1103 else if (last > x)
1104 { /* there is no entry that has a start after the new region ends
1105 ... we handle this by creating an invalid entry at the end point */
1106 if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS)
1107 {
1108 #if MPU_DEVELOPMENT_MODE
1109 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1110 #endif
1111 return rv;
1112 }
1113 #if MPU_DEVELOPMENT_MODE
1114 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1115 #endif
1116 }
1117 else
1118 {
1119 pbg = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[get_bg_map_index(Xthal_mpu_bgmap, x)]);
1120 /* so there is an existing entry we must deal with. We next need to find
1121 * if there is an existing background entry in between the end of
1122 * the new region and beginning of the next.
1123 */
1124 if ((pbg != x) && (pbg > last))
1125 {
1126 /* okay ... there is an intervening background map entry. We need
1127 * to handle this by inserting an aligning entry (if the architecture requires it)
1128 * and then placing writing an invalid entry at end.
1129 */
1130 if (XCHAL_MPU_ALIGN_REQ)
1131 {
1132 if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, pbg, 0)) != XTHAL_SUCCESS)
1133 {
1134 #if MPU_DEVELOPMENT_MODE
1135 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1136 #endif
1137 return rv;
1138 }
1139 #if MPU_DEVELOPMENT_MODE
1140 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1141 #endif
1142 }
1143 if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS)
1144 {
1145 #if MPU_DEVELOPMENT_MODE
1146 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1147 #endif
1148 return rv;
1149 }
1150 #if MPU_DEVELOPMENT_MODE
1151 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1152 #endif
1153 }
1154 else
1155 /* ok so there are no background map entry in between end and x, in this case
1156 * we just need to create a new entry at end writing the existing attributes.
1157 */
1158 if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS)
1159 {
1160 #if MPU_DEVELOPMENT_MODE
1161 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1162 #endif
1163 return rv;
1164 }
1165 #if MPU_DEVELOPMENT_MODE
1166 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1167 #endif
1168 }
1169 }
1170
1171 /* last, but not least we need to insert a entry at the starting address for our new region */
1172 if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, start_initial_region(fg, Xthal_mpu_bgmap, first, last), 0))
1173 != XTHAL_SUCCESS)
1174 {
1175 #if MPU_DEVELOPMENT_MODE
1176 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1177 #endif
1178 return rv;
1179 }
1180 #if MPU_DEVELOPMENT_MODE
1181 assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap);
1182 #endif
1183 }
1184 // up to this point, the attributes of every byte in the address space should be the same as when this function
1185 // was called.
1186 safe_and_commit_overlaped_regions(fg, Xthal_mpu_bgmap, first, last, memoryType, accessRights, writeback, invalidate);
1187
1188 assert_map_valid();
1189 return XTHAL_SUCCESS;
1190 }
1191
1192 // checks if x (full 32bit) is mpu_aligned for MPU
mpu_aligned(unsigned x)1193 static unsigned int mpu_aligned(unsigned x)
1194 {
1195 return !(x & MPU_ALIGNMENT_MASK);
1196 }
1197
mpu_align(unsigned int x,unsigned int roundUp)1198 static unsigned int mpu_align(unsigned int x, unsigned int roundUp)
1199 {
1200 if (roundUp)
1201 return (x + MPU_ALIGNMENT_MASK) & MPU_ADDRESS_MASK;
1202 else
1203 return (x & MPU_ADDRESS_MASK);
1204 }
1205
1206 #endif
1207
1208 #if defined(__SPLIT__mpu_check)
bad_accessRights(unsigned ar)1209 static int bad_accessRights(unsigned ar)
1210 {
1211 if (ar == 0 || (ar >= 4 && ar <= 15))
1212 return 0;
1213 else
1214 return 1;
1215 }
1216
1217 /* this function checks if the supplied map 'fg' is a valid MPU map using 3 criteria:
1218 * 1) if an entry is valid, then that entries accessRights must be defined (0 or 4-15).
1219 * 2) The map entries' 'vStartAddress's must be in increasing order.
1220 * 3) If the architecture requires background map alignment then:
1221 * a) If entry0's 'vStartAddress' field is nonzero, then that field must equal
1222 * one of the Background Map's 'vStartAddress' field values if the entry 0's valid bit is set.
1223 * b) If entryN's 'valid' bit is 0 and entry[N+1]'s 'valid' bit is 1, then
1224 * entry[N+1]'s 'vStartAddress' field must equal one of the Background Map's 'vStartAddress' field values.
1225 *
1226 * This function returns XTHAL_SUCCESS if the map satisfies the condition, otherwise it returns
1227 * XTHAL_BAD_ACCESS_RIGHTS, XTHAL_OUT_OF_ORDER_MAP, or XTHAL_MAP_NOT_ALIGNED.
1228 *
1229 */
check_map(const xthal_MPU_entry * fg,unsigned int n,const xthal_MPU_entry * bg)1230 static int check_map(const xthal_MPU_entry* fg, unsigned int n, const xthal_MPU_entry* bg)
1231 {
1232 int i;
1233 unsigned current = 0;
1234 if (!n)
1235 return XTHAL_SUCCESS;
1236 if (n > XCHAL_MPU_ENTRIES)
1237 return XTHAL_OUT_OF_ENTRIES;
1238 for (i = 0; i < n; i++)
1239 {
1240 if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && bad_accessRights(XTHAL_MPU_ENTRY_GET_ACCESS(fg[i])))
1241 return XTHAL_BAD_ACCESS_RIGHTS;
1242 if ((XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < current))
1243 return XTHAL_OUT_OF_ORDER_MAP;
1244 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) & MPU_VSTART_CORRECTNESS_MASK)
1245 return XTHAL_MAP_NOT_ALIGNED;
1246 current = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]);
1247 }
1248 if (XCHAL_MPU_ALIGN_REQ && XTHAL_MPU_ENTRY_GET_VALID(fg[0]) && XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[0])
1249 && !_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[0]), bg))
1250 return XTHAL_MAP_NOT_ALIGNED;
1251 for (i = 0; i < n- 1; i++)
1252 if (XCHAL_MPU_ALIGN_REQ && !XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && XTHAL_MPU_ENTRY_GET_VALID(fg[i + 1])
1253 && !_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i + 1]), bg))
1254 return XTHAL_MAP_NOT_ALIGNED;
1255 return XTHAL_SUCCESS;
1256 }
1257
1258
1259
1260 /*
1261 * this function checks that the bit-wise or-ed XTHAL_MEM_... bits in x correspond to a valid
1262 * MPU memoryType. If x is valid, then 0 is returned, otherwise XTHAL_BAD_MEMORY_TYPE is
1263 * returned.
1264 */
check_memory_type(unsigned x)1265 static int check_memory_type(unsigned x)
1266 {
1267 unsigned system_cache_type = _XTHAL_MEM_CACHE_MASK(x);
1268 unsigned processor_cache_type = (((x) & _XTHAL_LOCAL_CACHE_BITS) >> 4);
1269 if ((system_cache_type > XTHAL_MEM_NON_CACHEABLE) || (processor_cache_type > XTHAL_MEM_NON_CACHEABLE))
1270 return XTHAL_BAD_MEMORY_TYPE;
1271 int processor_cache_type_set = 1;
1272 if (!processor_cache_type)
1273 {
1274 processor_cache_type = system_cache_type << 4;
1275 processor_cache_type_set = 0;
1276 }
1277 unsigned device = _XTHAL_MEM_IS_DEVICE(x);
1278 unsigned system_noncacheable = _XTHAL_IS_SYSTEM_NONCACHEABLE(x);
1279
1280 if (device | system_noncacheable)
1281 {
1282 if ((system_cache_type || processor_cache_type_set) && device)
1283 return XTHAL_BAD_MEMORY_TYPE;
1284 if (processor_cache_type_set)
1285 return XTHAL_BAD_MEMORY_TYPE; // if memory is device or non cacheable, then processor cache type should not be set
1286 if (system_noncacheable && (x & XTHAL_MEM_INTERRUPTIBLE))
1287 return XTHAL_BAD_MEMORY_TYPE;
1288 {
1289 unsigned z = x & XTHAL_MEM_SYSTEM_SHAREABLE;
1290 if ((z == XTHAL_MEM_INNER_SHAREABLE) || (z == XTHAL_MEM_OUTER_SHAREABLE))
1291 return XTHAL_BAD_MEMORY_TYPE;
1292 }
1293 }
1294 else
1295 {
1296 if ((x & XTHAL_MEM_SYSTEM_SHAREABLE) == XTHAL_MEM_SYSTEM_SHAREABLE)
1297 return XTHAL_BAD_MEMORY_TYPE;
1298 if ((x & (XTHAL_MEM_BUFFERABLE | XTHAL_MEM_INTERRUPTIBLE)))
1299 return XTHAL_BAD_MEMORY_TYPE;
1300 }
1301
1302 return 0;
1303 }
1304 #endif
1305
1306 #endif // is MPU
1307
1308 #if defined(__SPLIT__mpu_basic)
1309 /*
1310 * These functions accept encoded access rights, and return 1 if the supplied memory type has the property specified by the function name.
1311 */
xthal_is_kernel_readable(int accessRights)1312 extern int xthal_is_kernel_readable(int accessRights)
1313 {
1314 #if XCHAL_HAVE_MPU
1315 return is_kernel_readable(accessRights);
1316 #else
1317 return XTHAL_UNSUPPORTED;
1318 #endif
1319 }
1320
xthal_is_kernel_writeable(int accessRights)1321 extern int xthal_is_kernel_writeable(int accessRights)
1322 {
1323 #if XCHAL_HAVE_MPU
1324 return is_kernel_writeable(accessRights);
1325 #else
1326 return XTHAL_UNSUPPORTED;
1327 #endif
1328 }
1329
xthal_is_kernel_executable(int accessRights)1330 extern int xthal_is_kernel_executable(int accessRights)
1331 {
1332 #if XCHAL_HAVE_MPU
1333 return is_kernel_executable(accessRights);
1334 #else
1335 return XTHAL_UNSUPPORTED;
1336 #endif
1337 }
1338
xthal_is_user_readable(int accessRights)1339 extern int xthal_is_user_readable(int accessRights)
1340 {
1341 #if XCHAL_HAVE_MPU
1342 return is_user_readable(accessRights);
1343 #else
1344 return XTHAL_UNSUPPORTED;
1345 #endif
1346 }
1347
xthal_is_user_writeable(int accessRights)1348 extern int xthal_is_user_writeable(int accessRights)
1349 {
1350 #if XCHAL_HAVE_MPU
1351 return is_user_writeable(accessRights);
1352 #else
1353 return XTHAL_UNSUPPORTED;
1354 #endif
1355 }
1356
xthal_is_user_executable(int accessRights)1357 extern int xthal_is_user_executable(int accessRights)
1358 {
1359 #if XCHAL_HAVE_MPU
1360 return is_user_executable(accessRights);
1361 #else
1362 return XTHAL_UNSUPPORTED;
1363 #endif
1364 }
1365
1366 /*
1367 * These functions accept either an encoded or unencoded memory type, and
1368 * return 1 if the supplied memory type has property specified by the
1369 * function name.
1370 */
xthal_is_cacheable(unsigned int mt)1371 int xthal_is_cacheable(unsigned int mt)
1372 {
1373 #if XCHAL_HAVE_MPU
1374 return is_cacheable(mt);
1375 #else
1376 return XTHAL_UNSUPPORTED;
1377 #endif
1378 }
1379
xthal_is_writeback(unsigned int mt)1380 int xthal_is_writeback(unsigned int mt)
1381 {
1382 #if XCHAL_HAVE_MPU
1383 return is_writeback(mt);
1384 #else
1385 return XTHAL_UNSUPPORTED;
1386 #endif
1387 }
1388
xthal_is_device(unsigned int mt)1389 int xthal_is_device(unsigned int mt)
1390 {
1391 #if XCHAL_HAVE_MPU
1392 return is_device(mt);
1393 #else
1394 return XTHAL_UNSUPPORTED;
1395 #endif
1396 }
1397 #endif
1398
1399 /*
1400 * This function converts a bit-wise combination of the XTHAL_MEM_.. constants
1401 * to the corresponding MPU memory type (9-bits).
1402 *
1403 * If none of the XTHAL_MEM_.. bits are present in the argument, then
1404 * bits 4-12 (9-bits) are returned ... this supports using an already encoded
1405 * memoryType (perhaps obtained from an xthal_MPU_entry structure) as input
1406 * to xthal_set_region_attribute().
1407 *
1408 * This function first checks that the supplied constants are a valid and
1409 * supported combination. If not, it returns XTHAL_BAD_MEMORY_TYPE.
1410 */
1411 #if defined(__SPLIT__mpu_check)
xthal_encode_memory_type(unsigned int x)1412 int xthal_encode_memory_type(unsigned int x)
1413 {
1414 #if XCHAL_HAVE_MPU
1415 const unsigned int MemoryTypeMask = 0x1ff0;
1416 const unsigned int MemoryFlagMask = 0xffffe000;
1417 /*
1418 * Encodes the memory type bits supplied in an | format (XCHAL_CA_PROCESSOR_CACHE_WRITEALLOC | XCHAL_CA_PROCESSOR_CACHE_WRITEBACK)
1419 */
1420 unsigned memoryFlags = x & MemoryFlagMask;
1421 if (!memoryFlags)
1422 return (x & MemoryTypeMask) >> XTHAL_AR_WIDTH;
1423 else
1424 {
1425 int chk = check_memory_type(memoryFlags);
1426 if (chk < 0)
1427 return chk;
1428 else
1429 return XTHAL_ENCODE_MEMORY_TYPE(memoryFlags);
1430 }
1431 #else
1432 return XTHAL_UNSUPPORTED;
1433 #endif
1434 }
1435 #endif
1436
1437 #if defined(__SPLIT__mpu_rmap)
1438
1439 /*
1440 * Copies the current MPU entry list into 'entries' which
1441 * must point to available memory of at least
1442 * sizeof(xthal_MPU_entry) * XCHAL_MPU_ENTRIES.
1443 *
1444 * This function returns XTHAL_SUCCESS.
1445 * XTHAL_INVALID, or
1446 * XTHAL_UNSUPPORTED.
1447 */
xthal_read_map(xthal_MPU_entry * fg_map)1448 int xthal_read_map(xthal_MPU_entry* fg_map)
1449 {
1450 #if XCHAL_HAVE_MPU
1451 unsigned i;
1452 if (!fg_map)
1453 return XTHAL_INVALID;
1454 xthal_read_map_raw(fg_map);
1455 return XTHAL_SUCCESS;
1456 #else
1457 return XTHAL_UNSUPPORTED;
1458 #endif
1459 }
1460
1461 #if XCHAL_HAVE_MPU
1462 #undef XCHAL_MPU_BGMAP
1463 #define XCHAL_MPU_BGMAP(s,vstart,vend,rights,mtype,x...) XTHAL_MPU_ENTRY(vstart,1,rights,mtype),
1464 const xthal_MPU_entry Xthal_mpu_bgmap[] = { XCHAL_MPU_BACKGROUND_MAP(0) };
1465 #endif
1466
1467
1468 /*
1469 * Copies the MPU background map into 'entries' which must point
1470 * to available memory of at least
1471 * sizeof(xthal_MPU_entry) * XCHAL_MPU_BACKGROUND_ENTRIES.
1472 *
1473 * This function returns XTHAL_SUCCESS.
1474 * XTHAL_INVALID, or
1475 * XTHAL_UNSUPPORTED.
1476 */
xthal_read_background_map(xthal_MPU_entry * bg_map)1477 int xthal_read_background_map(xthal_MPU_entry* bg_map)
1478 {
1479 #if XCHAL_HAVE_MPU
1480 if (!bg_map)
1481 return XTHAL_INVALID;
1482 memcpy(bg_map, Xthal_mpu_bgmap, sizeof(Xthal_mpu_bgmap));
1483 return XTHAL_SUCCESS;
1484 #else
1485 return XTHAL_UNSUPPORTED;
1486 #endif
1487 }
1488 #endif
1489 /*
1490 * Writes the map pointed to by 'entries' to the MPU. Before updating
1491 * the map, it commits any uncommitted
1492 * cache writes, and invalidates the cache if necessary.
1493 *
1494 * This function does not check for the correctness of the map. Generally
1495 * xthal_check_map() should be called first to check the map.
1496 *
1497 * If n == 0 then the existing map is cleared, and no new map is written
1498 * (useful for returning to reset state)
1499 *
1500 * If (n > 0 && n < XCHAL_MPU_ENTRIES) then a new map is written with
1501 * (XCHAL_MPU_ENTRIES-n) padding entries added to ensure a properly ordered
1502 * map. The resulting foreground map will be equivalent to the map vector
1503 * fg, but the position of the padding entries should not be relied upon.
1504 *
1505 * If n == XCHAL_MPU_ENTRIES then the complete map as specified by fg is
1506 * written.
1507 *
1508 * xthal_write_map() disables the MPU foreground map during the MPU
1509 * update and relies on the background map.
1510 *
1511 * As a result any interrupt that does not meet the following conditions
1512 * must be disabled before calling xthal_write_map():
1513 * 1) All code and data needed for the interrupt must be
1514 * mapped by the background map with sufficient access rights.
1515 * 2) The interrupt code must not access the MPU.
1516 *
1517 */
1518 #if defined(__SPLIT__mpu_wmap)
xthal_write_map(const xthal_MPU_entry * fg,unsigned int n)1519 void xthal_write_map(const xthal_MPU_entry* fg, unsigned int n)
1520 {
1521 #if XCHAL_HAVE_MPU
1522 unsigned int cacheadrdis = xthal_calc_cacheadrdis(fg, n);
1523 xthal_dcache_all_writeback_inv();
1524 xthal_icache_all_invalidate();
1525 xthal_write_map_raw(fg, n);
1526 write_cacheadrdis(cacheadrdis);
1527 isync(); // ditto
1528 #endif
1529 }
1530 #endif
1531
1532 #if defined(__SPLIT__mpu_check)
1533 /*
1534 * Checks if entry vector 'fg' of length 'n' is a valid MPU access map.
1535 * Returns:
1536 * XTHAL_SUCCESS if valid,
1537 * XTHAL_OUT_OF_ENTRIES
1538 * XTHAL_MAP_NOT_ALIGNED,
1539 * XTHAL_BAD_ACCESS_RIGHTS,
1540 * XTHAL_OUT_OF_ORDER_MAP, or
1541 * XTHAL_UNSUPPORTED if config doesn't have an MPU.
1542 */
xthal_check_map(const xthal_MPU_entry * fg,unsigned int n)1543 int xthal_check_map(const xthal_MPU_entry* fg, unsigned int n)
1544 {
1545 #if XCHAL_HAVE_MPU
1546 return check_map(fg, XCHAL_MPU_ENTRIES, Xthal_mpu_bgmap);
1547 #else
1548 return XTHAL_UNSUPPORTED;
1549 #endif
1550 }
1551 #endif
1552
1553 #if defined(__SPLIT__mpu_basic)
1554 /*
1555 * Returns the MPU entry that maps 'vaddr'. If 'infgmap' is non-NULL then it is
1556 * set to 1 if 'vaddr' is mapped by the foreground map, or 0 if 'vaddr'
1557 * is mapped by the background map.
1558 */
xthal_get_entry_for_address(void * paddr,int * infgmap)1559 extern xthal_MPU_entry xthal_get_entry_for_address(void* paddr, int* infgmap)
1560 {
1561 #if XCHAL_HAVE_MPU
1562 xthal_MPU_entry e;
1563 unsigned int p;
1564 __asm__ __volatile__("PPTLB %0, %1\n\t" : "=a" (p) : "a" (paddr));
1565 if ((p & 0x80000000))
1566 {
1567 if (infgmap)
1568 *infgmap = 1;
1569 e.at = (p & 0x1fffff);
1570 __asm__ __volatile__("RPTLB0 %0, %1\n\t" : "=a" (e.as) : "a" (p & 0x1f));
1571 return e;
1572 }
1573 else
1574 {
1575 int i;
1576 if (infgmap)
1577 *infgmap = 0;
1578 for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i > 0; i--)
1579 {
1580 if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[i]) <= (unsigned) paddr)
1581 {
1582 return Xthal_mpu_bgmap[i];
1583 }
1584 } // in background map
1585 return Xthal_mpu_bgmap[0];
1586 }
1587 #else
1588 xthal_MPU_entry e;
1589 return e;
1590 #endif
1591 }
1592 #endif
1593 /*
1594 * This function is intended as an MPU specific version of
1595 * xthal_set_region_attributes(). xthal_set_region_attributes() calls
1596 * this function for MPU configurations.
1597 *
1598 * This function sets the attributes for the region [vaddr, vaddr+size)
1599 * in the MPU.
1600 *
1601 * Depending on the state of the MPU this function will require from
1602 * 0 to 3 unused MPU entries.
1603 *
1604 * This function typically will move, add, and subtract entries from
1605 * the MPU map during execution, so that the resulting map may
1606 * be quite different than when the function was called.
1607 *
1608 * This function does make the following guarantees:
1609 * 1) The MPU access map remains in a valid state at all times
1610 * during its execution.
1611 * 2) At all points during (and after) completion the memoryType
1612 * and accessRights remain the same for all addresses
1613 * that are not in the range [vaddr, vaddr+size).
1614 * 3) If XTHAL_SUCCESS is returned, then the range
1615 * [vaddr, vaddr+size) will have the accessRights and memoryType
1616 * specified.
1617 *
1618 * The accessRights parameter should be either a 4-bit value corresponding
1619 * to an MPU access mode (as defined by the XTHAL_AR_.. constants), or
1620 * XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS.
1621 *
1622 * The memoryType parameter should be either a bit-wise or-ing of XTHAL_MEM_..
1623 * constants that represent a valid MPU memoryType, a 9-bit MPU memoryType
1624 * value, or XTHAL_MPU_USE_EXISTING_MEMORY_TYPE.
1625 *
1626 * In addition to the error codes that xthal_set_region_attribute()
1627 * returns, this function can also return: XTHAL_BAD_ACCESS_RIGHTS
1628 * (if the access rights bits map to an unsupported combination), or
1629 * XTHAL_OUT_OF_ENTRIES (if there are not enough unused MPU entries).
1630 *
1631 * If this function is called with an invalid MPU map, then this function
1632 * will return one of the codes that is returned by xthal_check_map().
1633 *
1634 * The flag, XTHAL_CAFLAG_EXPAND, is not supported.
1635 *
1636 */
1637 #if defined(__SPLIT__mpu_attributes)
xthal_mpu_set_region_attribute(void * vaddr,unsigned size,int accessRights,int memoryType,unsigned flags)1638 int xthal_mpu_set_region_attribute(void* vaddr, unsigned size, int accessRights, int memoryType, unsigned flags)
1639 {
1640 #if XCHAL_HAVE_MPU
1641 unsigned int first;
1642 unsigned int last;
1643 int rv;
1644
1645 if (flags & XTHAL_CAFLAG_EXPAND)
1646 return XTHAL_UNSUPPORTED;
1647 if (size == 0)
1648 return XTHAL_ZERO_SIZED_REGION;
1649 first = (unsigned) vaddr;
1650 last = first + size;
1651 if (last != 0xFFFFFFFF)
1652 last--;
1653 if (first >= last)
1654 return XTHAL_INVALID_ADDRESS_RANGE; // Wraps around
1655
1656 if (accessRights & XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS)
1657 {
1658 accessRights = XTHAL_MPU_ENTRY_GET_ACCESS(xthal_get_entry_for_address(vaddr, 0));
1659 }
1660 else
1661 {
1662 accessRights = encode_access_rights(accessRights);
1663 if (accessRights < 0)
1664 return XTHAL_BAD_ACCESS_RIGHTS;
1665 }
1666 if (memoryType & XTHAL_MPU_USE_EXISTING_MEMORY_TYPE)
1667 {
1668 memoryType = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(xthal_get_entry_for_address(vaddr, 0));
1669 }
1670 else
1671 {
1672 if (memoryType & 0xffffe000) // Tests if any of the XTHAL MEM flags are present
1673 memoryType = xthal_encode_memory_type(memoryType);
1674 else
1675 if (memoryType & 0xfffffe00) // Tests if any of bits from 9 to 13 are set indicating
1676 // that the memoryType was improperly shifted
1677 // we flag this as an error
1678 return XTHAL_BAD_MEMORY_TYPE;
1679 if (memoryType < 0)
1680 return XTHAL_BAD_MEMORY_TYPE;
1681 }
1682 if (flags & XTHAL_CAFLAG_EXACT)
1683 if (!mpu_aligned(first) || !mpu_aligned(last + 1))
1684 return XTHAL_INEXACT;
1685
1686 first = mpu_align(first, (flags & XTHAL_CAFLAG_NO_PARTIAL));
1687 if (last != 0xffffffff)
1688 {
1689 last = mpu_align(last + 1, !(flags & XTHAL_CAFLAG_NO_PARTIAL));
1690 if (first >= last)
1691 return ((flags & XTHAL_CAFLAG_NO_PARTIAL) ? XTHAL_ZERO_SIZED_REGION : 0);
1692 }
1693 rv = safe_add_region(first, last, accessRights, memoryType, !(flags & XTHAL_CAFLAG_NO_AUTO_WB),
1694 !(flags & XTHAL_CAFLAG_NO_AUTO_INV));
1695 isync();
1696 return rv;
1697 #else
1698 return XTHAL_UNSUPPORTED;
1699 #endif
1700 }
1701 #endif
1702
1703
1704 #if defined(__SPLIT__mpu_cachedis)
1705
max2(unsigned int a,unsigned int b)1706 inline static unsigned int max2(unsigned int a, unsigned int b)
1707 {
1708 if (a>b)
1709 return a;
1710 else
1711 return b;
1712 }
1713
mask_cachedis(unsigned int current,int first_region,int last_region)1714 inline static unsigned int mask_cachedis(unsigned int current, int first_region,
1715 int last_region)
1716 {
1717 unsigned int x;
1718 x = ((1 << (last_region - first_region + 1)) - 1) << first_region;
1719 current &= ~x;
1720 return current;
1721 }
1722
1723 /*
1724 * xthal_calc_cacheadrdis() computes the value that should be written
1725 * to the CACHEADRDIS register. The return value has bits 0-7 set according as:
1726 * bit n: is zero if any part of the region [512MB * n, 512MB* (n-1)) is cacheable.
1727 * is one if NO part of the region [512MB * n, 512MB* (n-1)) is cacheable.
1728 *
1729 * This function looks at both the loops through both the foreground and background maps
1730 * to find cacheable area. Once one cacheable area is found in a 512MB region, then we
1731 * skip to the next 512MB region.
1732 */
xthal_calc_cacheadrdis(const xthal_MPU_entry * fg,unsigned int num_entries)1733 unsigned int xthal_calc_cacheadrdis(const xthal_MPU_entry* fg, unsigned int num_entries)
1734 {
1735 #if XCHAL_HAVE_MPU
1736 unsigned int cachedis = 0xff;
1737 int fg_index = num_entries - 1;
1738 int bg_index = XCHAL_MPU_BACKGROUND_ENTRIES - 1;
1739 int working_region = 7;
1740 int ending_region;
1741 unsigned int vaddr = 0xffffffff;
1742 while (bg_index >= 0 || fg_index >= 0)
1743 {
1744 if ((fg_index >= 0 && XTHAL_MPU_ENTRY_GET_VALID(fg[fg_index])))
1745 {
1746 vaddr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[fg_index]);
1747 ending_region = vaddr >> 29;
1748 if (ending_region <= working_region)
1749 {
1750 unsigned int mt = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[fg_index]);
1751 if (is_cacheable(mt))
1752 {
1753 cachedis = mask_cachedis(cachedis, ending_region,
1754 working_region);
1755 /* Optimize since we have found one cacheable entry in the region ... no need to look for more */
1756 if (ending_region == 0)
1757 return cachedis;
1758 else
1759 working_region = ending_region - 1;
1760 }
1761 else
1762 if (vaddr & 0x1fffffff) // If vaddr is on a 512MB region we want to move to the next region
1763 working_region = ending_region;
1764 else
1765 working_region = ending_region - 1;
1766 }
1767 }
1768 else if ((bg_index >= 0)
1769 && ((fg_index <= 0)
1770 || XTHAL_MPU_ENTRY_GET_VALID(fg[fg_index-1]))&& vaddr)
1771 {
1772 unsigned int caddr;
1773 unsigned int low_addr = (
1774 (fg_index >= 0) ?
1775 (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[fg_index])) :
1776 0);
1777 /* First skip any background entries that start after the address of interest */
1778 while ((caddr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[bg_index])) >= vaddr)
1779 bg_index--;
1780 do
1781 {
1782 caddr = max2(XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[bg_index]),
1783 low_addr);
1784 ending_region = caddr >> 29;
1785 if (ending_region <= working_region)
1786 {
1787 unsigned int mt = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(
1788 Xthal_mpu_bgmap[bg_index]);
1789 if (is_cacheable(mt))
1790 {
1791 cachedis = mask_cachedis(cachedis, ending_region,
1792 working_region);
1793 /* Optimize since we have found one cacheable entry in the region ...
1794 * no need to look for more */
1795 if (ending_region == 0)
1796 return cachedis; // we are done
1797 else
1798 working_region = ending_region - 1;
1799 }
1800 else
1801 if (caddr & 0x1fffffff)
1802 working_region = ending_region;
1803 else
1804 working_region = ending_region - 1;
1805 }
1806 bg_index--;
1807 }while (caddr > low_addr);
1808 vaddr = caddr;
1809 }
1810 fg_index--;
1811 if (!vaddr)
1812 break;
1813 }
1814 return cachedis;
1815 #else
1816 return 0;
1817 #endif
1818 }
1819 #endif
1820
1821 #if defined(__SPLIT__mpu_basic)
1822 void (*_xthal_assert_handler)();
1823 /* Undocumented internal testing function */
_xthal_set_assert_handler(void (* handler)())1824 extern void _xthal_set_assert_handler(void (*handler)())
1825 {
1826 #if XCHAL_HAVE_MPU
1827 _xthal_assert_handler = handler;
1828 #endif
1829 }
1830 #endif
1831