1 /*
2  * Copyright (c) 2004-2014 Tensilica Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <xtensa/config/core.h>
24 
25 #if XCHAL_HAVE_XEA2 && (!XCHAL_HAVE_MPU)
26 /*
27  * C-stubs to issue the tlb related instructions (with dsync and isync's if needed).
28  *
29  */
write_dtlb_entry(unsigned vpn_way,unsigned ppn_ca)30 static inline void write_dtlb_entry(unsigned vpn_way, unsigned ppn_ca) {
31 	__asm__ __volatile__("wdtlb  %1, %0; dsync\n\t"
32 			: : "r" (vpn_way), "r" (ppn_ca) );
33 }
34 
write_itlb_entry(unsigned vpn_way,unsigned ppn_ca)35 static inline void write_itlb_entry(unsigned vpn_way, unsigned ppn_ca) {
36 	__asm__ __volatile__("witlb  %1, %0; isync\n\t"
37 			: : "r" (vpn_way), "r" (ppn_ca) );
38 }
39 
read_dtlb1_entry(unsigned addr)40 static inline unsigned read_dtlb1_entry(unsigned addr) {
41 	unsigned long tmp;
42 	__asm__ __volatile__("rdtlb1  %0, %1\n\t"
43 			: "=a" (tmp)
44 			: "a" (addr));
45 	return tmp;
46 }
47 
read_itlb1_entry(unsigned addr)48 static inline unsigned read_itlb1_entry(unsigned addr) {
49 	unsigned long tmp;
50 	__asm__ __volatile__("ritlb1  %0, %1\n\t"
51 			: "=a" (tmp)
52 			: "a" (addr));
53 	return tmp;
54 }
55 
probe_dtlb(unsigned addr)56 static inline unsigned probe_dtlb(unsigned addr) {
57 	unsigned long tmp;
58 	__asm__ __volatile__("pdtlb  %0, %1\n\t"
59 			: "=a" (tmp)
60 			: "a" (addr));
61 	return tmp;
62 }
63 
probe_itlb(unsigned addr)64 static inline unsigned probe_itlb(unsigned addr) {
65 	unsigned long tmp;
66 	__asm__ __volatile__("pitlb  %0, %1\n\t"
67 			: "=a" (tmp)
68 			: "a" (addr));
69 	return tmp;
70 }
71 
invalidate_dtlb_entry(unsigned addr)72 static inline void invalidate_dtlb_entry(unsigned addr) {
73 	__asm__ __volatile__("idtlb  %0; dsync \n\t"
74 			: : "a" (addr));
75 }
76 
invalidate_itlb_entry(unsigned addr)77 static inline void invalidate_itlb_entry(unsigned addr) {
78 	__asm__ __volatile__("iitlb  %0 ; isync\n\t"
79 			: : "a" (addr));
80 }
81 
read_dtlbcfg()82 static inline unsigned read_dtlbcfg() {
83 	unsigned long tmp;
84 	__asm__ __volatile__("rsr.dtlbcfg %0\n\t"
85 			: "=a" (tmp));
86 	return tmp;
87 }
88 
read_itlbcfg()89 static inline unsigned read_itlbcfg() {
90 	unsigned long tmp;
91 	__asm__ __volatile__("rsr.itlbcfg %0\n\t"
92 			: "=a" (tmp));
93 	return tmp;
94 }
95 
96 #endif
97 
98 /*
99  *  xthal_set_region_translation_raw is a quick and simple function
100  *  to set both physical address <paddr> and cache attribute <cattr> for
101  *  a 512MB region at <vaddr>.
102  *
103  *  Parameters:
104  *  void* vaddr		512MB aligned pointer representing the start of virtual address region
105  *  void* paddr		512MB aligned pointer representing the start of physical address region
106  *  unsigned cattr	4 bit value encoding the caching properties and rights (MMU only).
107  *
108  *  returns 0 (XCHAL_SUCCESS) if successful
109  *  returns non zero (XCHAL_UNSUPPORTED) on failure
110  *
111  *  This function has the following limitations:
112  *
113  *  1) Requires either the Region Translation Option or a v3 MMU running in the default mode (with spanning way)
114  *  2) It does no error checking.
115  *  3) Deals with one 512MB region (vaddr and paddr are required to be 512MB aligned although that is not explicitly checked)
116  *  4) It requires the caller to do any cache flushing that is needed
117  *  5) Doesn't support mnemonically setting the 'rights' (rwx, rw, ... ) bit on the MMU
118  *  6) It is illegal to change the mapping of the region containing the current PC (not checked)
119  *
120  */
xthal_set_region_translation_raw(void * vaddr,void * paddr,unsigned cattr)121 int xthal_set_region_translation_raw(void *vaddr, void *paddr, unsigned cattr) {
122 #if XCHAL_HAVE_MPU
123 	return XTHAL_UNSUPPORTED;
124 #else
125 #if XCHAL_HAVE_XEA2
126 #if XCHAL_HAVE_XLT_CACHEATTR || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
127 # if XCHAL_HAVE_XLT_CACHEATTR
128 	unsigned vpn_way = (unsigned)vaddr;
129 # else
130 	unsigned vpn_way = ((unsigned) vaddr & 0xFFFFFFF0) + XCHAL_SPANNING_WAY;
131 # endif
132 	unsigned ppn_ca = ((unsigned) paddr & 0xFFFFFFF0) + (cattr & 0xF);
133 	write_dtlb_entry(vpn_way, ppn_ca);
134 	write_itlb_entry(vpn_way, ppn_ca);
135 	return XTHAL_SUCCESS;
136 #else
137 	return XTHAL_UNSUPPORTED;
138 #endif
139 #else
140 	return XTHAL_UNSUPPORTED;
141 #endif
142 #endif
143 }
144 
145 /*
146  * xthal_v2p() takes a virtual address as input, and if that virtual address is mapped to a physical address
147  * by the MMU, it returns the:
148  * 		a) corresponding physical address
149  * 		b) the tlb way that is used to translate the address
150  * 		c) cache attribute for translation
151  *
152  * 	Parameters:
153  * 	void* 		vaddr		A pointer representing the virtual address (there are no alignment requirements for this address)
154  * 	void**		paddr		This value can be 0, or can point to a pointer variable which will be updated to contain the physical address
155  * 	unsigned*	way			This value can be 0, or can point to an unsigned variable which will be updated to contain the TLB way.
156  * 	unsigned*   cattr		This value can be 0, or can point to an unsigned variable which will be updated to contain the cache attr
157  * 	                        For MPU configurations bits 0..3 hold the access rights and bits 4..8 hold the encoded memory type
158  *
159  *  Returns 	0 (XCHAL_SUCCESS) 				if successful
160  * 				XTHAL_NO_MAPPING				if there is no current mapping for the virtual address
161  * 				XCHAL_UNSUPPORTED            	if unsupported
162  *
163  * 	Limitations:
164  * 					Assumes that architecture variable DVARWAY56 is "Variable"
165  * 					Uses the D-TLBS for the translation ... assumption is that ITLB's have same mappings
166  */
xthal_v2p(void * vaddr,void ** paddr,unsigned * way,unsigned * cattr)167 int xthal_v2p(void* vaddr, void** paddr, unsigned *way, unsigned* cattr) {
168 #if XCHAL_HAVE_XEA2
169 #if XCHAL_HAVE_MPU
170   if (paddr)
171     *paddr = vaddr;
172   if (way)
173     *way = 0;
174   if (cattr)
175   {
176       struct xthal_MPU_entry x = xthal_get_entry_for_address(vaddr, 0);
177       *cattr = XTHAL_MPU_ENTRY_GET_ACCESS(x) | XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(x) << XTHAL_AR_WIDTH;
178   }
179   return XTHAL_SUCCESS;
180 #else
181 	unsigned long probe = probe_dtlb((unsigned) vaddr);
182 #if !XCHAL_HAVE_PTP_MMU
183 	if (!(0x1 & probe))
184 	return XTHAL_NO_MAPPING;
185 	if (way)
186 	*way = 1;
187 	if (paddr || cattr) {
188 		unsigned long temp;
189 		temp = read_dtlb1_entry(probe);
190 		unsigned ppn = 0xe0000000 & temp;
191 		unsigned att = 0xf & temp;
192 		if (paddr)
193 		*paddr = ((void*) (ppn + (((unsigned) vaddr) & 0x1fffffff)));
194 		if (cattr)
195 		*cattr = att;
196 	}
197 #else
198 	{
199 		unsigned iway;
200 		if (!(0x10 & probe))
201 			return XTHAL_NO_MAPPING;
202 		iway = 0xf & probe;
203 		if (way)
204 			*way = iway;
205 		if (paddr || cattr) {
206 			unsigned temp;
207 			unsigned ppn = 0;
208 			unsigned ppn1;
209 			unsigned dtlbcfg = read_dtlbcfg();
210 			temp = read_dtlb1_entry(probe);
211 			unsigned att = 0xf & temp;
212 			if (cattr)
213 				*cattr = att;
214 			if (paddr)
215 				switch (iway) // followin code derived from fig 4-40 from ISA MMU Option Data (at) Format for RxTLB1
216 				{ /* 4k pages */
217 				case 0:
218 				case 1:
219 				case 2:
220 				case 3:
221 				case 7:
222 				case 8:
223 				case 9:
224 					ppn = 0xfffff000; // 4k pages
225 					break;
226 				case 4: {
227 					switch ((dtlbcfg & (0x3 << 16)) >> 16) // bits 16 & 17
228 					{
229 					case 0: // 1MB pages
230 						ppn = 0xfff00000;
231 						break;
232 					case 1: // 4MB pages
233 						ppn = 0xffc00000;
234 						break;
235 					case 2: // 16MB pages
236 						ppn = 0xff000000;
237 						break;
238 					case 3: // 64MB pages
239 						ppn = 0xfc000000;
240 						break;
241 					default:
242 						return XTHAL_UNSUPPORTED;
243 					}
244 				}
245 					break;
246 				case 5:
247 					if ((dtlbcfg & (1 << 20)))
248 						ppn = 0xf8000000; // 128MB pages
249 					else
250 						ppn = 0xf0000000; // 256MB pages
251 					break;
252 				case 6:
253 					if ((dtlbcfg & (1 << 24)))
254 						ppn = 0xe0000000; // 512MB pages
255 					else
256 						ppn = 0xf0000000; // 256MB pages
257 					break;
258 				default:
259 					return XTHAL_UNSUPPORTED;
260 					break;
261 				}
262 			ppn1 = ppn & temp;
263 			*paddr = ((void*) (ppn1 + (((unsigned) vaddr) & (~ppn))));
264 		}
265 	}
266 #endif
267 	return XTHAL_SUCCESS;
268 #endif
269 #else
270 	return XTHAL_UNSUPPORTED;
271 #endif
272 }
273 
274 /* these constants borrowed from xthal_set_region_attribute */
275 # if XCHAL_HAVE_PTP_MMU
276 #  define CA_BYPASS		XCHAL_CA_BYPASS
277 #  define CA_WRITETHRU		XCHAL_CA_WRITETHRU
278 #  define CA_WRITEBACK		XCHAL_CA_WRITEBACK
279 #  define CA_WRITEBACK_NOALLOC	XCHAL_CA_WRITEBACK_NOALLOC
280 #  define CA_ILLEGAL		XCHAL_CA_ILLEGAL
281 # else
282 /*  Hardcode these, because they get remapped when caches or writeback not configured:  */
283 #  define CA_BYPASS		2
284 #  define CA_WRITETHRU		1
285 #  define CA_WRITEBACK		4
286 #  define CA_WRITEBACK_NOALLOC	5
287 #  define CA_ILLEGAL		15
288 # endif
289 
290 /* internal function that returns 1 if the supplied attr indicates the
291  * cache is in writeback mode.
292  */
is_writeback(unsigned attr)293 static inline int is_writeback(unsigned attr) {
294 #if XCHAL_HAVE_XLT_CACHEATTR
295 	return attr == CA_WRITEBACK || attr == CA_WRITEBACK_NOALLOC;
296 #endif
297 #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
298 	return (attr | 0x3) == CA_WRITEBACK;
299 #endif
300 	return -1; /* unsupported */
301 }
302 
303 /*
304  *  xthal_set_region_translation()
305  *
306  *  Establishes a new mapping (with the supplied cache attributes)
307  *  between a virtual address region, and a physical address region.
308  *
309  *  This function is only supported with following processor configurations:
310  *  				a) Region Translation
311  *  				b) v3 MMU with a spanning way running in the default mode
312  *
313  *  If the specified memory range exactly covers a series
314  *  of consecutive 512 MB regions, the address mapping and cache
315  *  attributes of these regions are updated.
316  *
317  *  If this is not the case, e.g. if either or both the
318  *  start and end of the range only partially cover a 512 MB
319  *  region, one of three results are possible:
320  *
321  *	1.  By default, the cache attribute of all regions
322  *	    covered, even just partially, is changed to
323  *	    the requested attribute.
324  *
325  *	2.  If the XTHAL_CAFLAG_EXACT flag is specified,
326  *	    a non-zero error code is returned.
327  *
328  *	3.  If the XTHAL_CAFLAG_NO_PARTIAL flag is specified
329  *	    (but not the EXACT flag), only regions fully
330  *	    covered by the specified range are updated with
331  *	    the requested attribute.
332  *
333  *  CACHE HANDLING
334  *
335  *  This function automatically writes back dirty data before remapping a
336  *  virtual address region.
337  *
338  *  This writeback is done safely, ie. by first switching to writethrough
339  *  mode, and then invoking xthal_dcache_all_writeback(). Such a sequence is
340  *  necessary to ensure there is no longer any dirty data in the memory region by the time
341  *  this function returns, even in the presence of interrupts, speculation, etc.
342  *  This automatic write-back can be disabled using the XTHAL_CAFLAG_NO_AUTO_WB flag.
343  *
344  *	This function also invalidates the caches after remapping a region because the
345  *	cache could contain (now invalid) data from the previous mapping.
346  *  This automatic invalidate can be disabled using the XTHAL_CAFLAG_NO_AUTO_INV flag.
347  *
348  *  Parameters:
349  *	vaddr	starting virtual address of region of memory
350  *
351  *	paddr	starting physical address for the mapping (this should be 512MB aligned to vaddr such that ((vaddr ^ paddr) & 0x10000000 == 0)
352  *
353  *	size	number of bytes in region of memory
354  *		(see above, SPECIFYING THE MEMORY REGION)
355  *
356  *	cattr	cache attribute (encoded);
357  *		typically taken from compile-time HAL constants
358  *		XCHAL_CA_{BYPASS, WRITETHRU, WRITEBACK[_NOALLOC], ILLEGAL}
359  *		(defined in <xtensa/config/core.h>);
360  *		in XEA1, this corresponds to the value of a nibble
361  *		in the CACHEATTR register;
362  *		in XEA2, this corresponds to the value of the
363  *		cache attribute (CA) field of each TLB entry
364  *
365  *	flags	bitwise combination of flags XTHAL_CAFLAG_*
366  *
367  *			XTHAL_CAFLAG_EXACT - If this flag is present,
368  *			the mapping will only be done if the specified
369  *			region exactly matches on or more 512MB pages otherwise
370  *			XCHAL_INEXACT is returned (and no mapping is done).
371  *
372  *			XTHAL_CAFLAG_NO_PARTIAL - If this flag is specified, then
373  *			only pages that are completely covered by the specified region
374  *			are affected.  If this flag is specified, and no pages are completely
375  *			covered by the region, then no pages are affected and XCHAL_NO_REGIONS_COVERED
376  *			is returned.
377  *
378  *
379  *
380  *  Returns:
381  *	XCHAL_SUCCESS 	-			successful, or size is zero
382  *
383  *	XCHAL_NO_REGIONS_COVERED	- 	XTHAL_CAFLAG_NO_PARTIAL flag specified and address range
384  *								is valid with a non-zero size, however no 512 MB region (or page)
385  *								is completely covered by the range
386  *
387  *	XCHAL_INEXACT 				XTHAL_CAFLAG_EXACT flag specified, and address range does
388  *								not exactly specify a 512 MB region (or page)
389  *
390  *	XCHAL_INVALID_ADDRESS		invalid address range specified (wraps around the end of memory)
391  *
392  *	XCHAL_ADDRESS_MISALIGNED	virtual and physical addresses are not aligned (512MB)
393  *
394  *
395  *	XCHAL_UNSUPPORTED_ON_THIS_ARCH	function not supported in this processor configuration
396  */
xthal_set_region_translation(void * vaddr,void * paddr,unsigned size,unsigned cattr,unsigned flags)397 int xthal_set_region_translation(void* vaddr, void* paddr, unsigned size,
398 		unsigned cattr, unsigned flags) {
399 #if XCHAL_HAVE_XEA2 & !XCHAL_HAVE_MPU
400 #if XCHAL_HAVE_XLT_CACHEATTR || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
401 	const unsigned CA_MASK = 0xF;
402 	const unsigned addr_mask = 0x1fffffff;
403 	const unsigned addr_shift = 29;
404 	unsigned vaddr_a = (unsigned) vaddr;
405 	unsigned paddr_a = (unsigned) paddr;
406 	unsigned end_vaddr;
407 	unsigned end_paddr;
408 	unsigned start_va_reg;
409 	unsigned end_va_reg;
410 	unsigned start_pa_reg;
411 	unsigned icache_attr = 0;
412 	int rv;
413 	int i;
414 	if (size == 0)
415 		return XTHAL_SUCCESS;
416 	if ((vaddr_a & addr_mask) ^ (paddr_a & addr_mask))
417 		return XTHAL_ADDRESS_MISALIGNED;
418 	icache_attr = cattr & CA_MASK;
419 #if (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
420 	// if using the mmu in spanning way mode then 'and in' the R, RX, RW, RWX bits
421 	if ((cattr & 0x40000000) && (icache_attr < 12))
422 		icache_attr = icache_attr & ((cattr & 0xF0) >> 4);
423 #endif
424 	end_vaddr = vaddr_a + size - 1;
425 	end_paddr = paddr_a + size - 1;
426 
427 	if ((end_vaddr < vaddr_a) || (end_paddr < paddr_a))
428 		return XTHAL_INVALID_ADDRESS;
429 	start_va_reg = vaddr_a >> addr_shift;
430 	end_va_reg = end_vaddr >> addr_shift;
431 	start_pa_reg = paddr_a >> addr_shift;
432 	if ((flags & XTHAL_CAFLAG_EXACT)
433 			&& ((size & addr_mask) || (vaddr_a & addr_mask)
434 					|| (paddr_a & addr_mask)))
435 		return XTHAL_INEXACT;
436 	if (flags & XTHAL_CAFLAG_NO_PARTIAL) {
437 		if (vaddr_a & addr_mask) {
438 			start_va_reg++;
439 			start_pa_reg++;
440 		}
441 		if ((end_vaddr & addr_mask) != addr_mask)
442 			end_va_reg--;
443 	}
444 	if (end_va_reg < start_va_reg)
445 		return XTHAL_NO_REGIONS_COVERED;
446 	/*
447 	 * Now we need to take care of any uncommitted cache writes in the affected regions
448 	 * 1) first determine if any regions are in write back mode
449 	 * 2) change those pages to write through
450 	 * 3) force the writeback of d-cache by calling xthal_dcach_all_writeback()
451 	 */
452 #if ((XCHAL_DCACHE_SIZE >0) && XCHAL_DCACHE_IS_WRITEBACK)
453 	if (!(flags & XTHAL_CAFLAG_NO_AUTO_WB)) {
454 		unsigned old_cache_attr = xthal_get_cacheattr();
455 		unsigned cachewrtr = old_cache_attr;
456 		unsigned need_safe_writeback = 0;
457 		for (i = start_va_reg; i <= end_va_reg; i++) {
458 			unsigned sh = i << 2;
459 			unsigned old_attr = (old_cache_attr >> sh) & CA_MASK;
460 			if (is_writeback(old_attr)) {
461 				need_safe_writeback = 1;
462 				cachewrtr = (cachewrtr & ~(CA_MASK << sh))
463 						| (CA_WRITETHRU << sh);
464 			}
465 		}
466 
467 		if (need_safe_writeback) {
468 			xthal_set_cacheattr(cachewrtr); /* set to writethru first, to safely writeback any dirty data */
469 			xthal_dcache_all_writeback(); /* much quicker than scanning entire 512MB region(s) */
470 		}
471 	}
472 #endif
473 	/* Now we set the affected region translations */
474 	for (i = start_va_reg; i <= end_va_reg; i++) {
475 		if ((rv = xthal_set_region_translation_raw(
476 				(void*) ((start_va_reg++) << addr_shift),
477 				(void*) ((start_pa_reg++) << addr_shift), icache_attr)))
478 			return rv;
479 	}
480 
481 	/*
482 	 * Now we need to invalidate the cache in the affected regions. For now invalidate entire cache,
483 	 * but investigate if there are faster alternatives on some architectures.
484 	 */
485 	if (!(flags & XTHAL_CAFLAG_NO_AUTO_INV)) {
486 # if XCHAL_DCACHE_SIZE > 0
487 		xthal_dcache_all_writeback_inv(); /* some areas in memory (outside the intended region) may have uncommitted
488 		 data so we need the writeback_inv(). */
489 #endif
490 #if	XCHAL_ICACHE_SIZE >0
491 		xthal_icache_all_invalidate();
492 #endif
493 	}
494 	return XTHAL_SUCCESS;
495 #else
496 	return XTHAL_UNSUPPORTED;
497 #endif
498 #else
499 	return XTHAL_UNSUPPORTED;
500 #endif
501 }
502 
503 /* xthal_invalidate_region()
504  * invalidates the tlb entry for the specified region.
505  *
506  * This function is only supported on processor configurations
507  * with a v3 MMU with a spanning way.
508  *
509  * Parameter
510  * vaddr - virtual address of region to invalidate (512MB aligned)
511  *
512  * returns:
513  * XCHAL_SUCCESS 					- Success
514  * XCHAL_UNSUPPORTED_ON_THIS_ARCH 			- Unsupported
515  *
516  */
xthal_invalidate_region(void * vaddr)517 int xthal_invalidate_region(void* vaddr) {
518 #if XCHAL_HAVE_XEA2 & !XCHAL_HAVE_MPU
519 #if (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
520 	unsigned addr = (unsigned) vaddr;
521 	if (addr & 0x1fffffff)
522 		return XTHAL_INVALID_ADDRESS;
523 	addr += XCHAL_SPANNING_WAY;
524 	invalidate_dtlb_entry(addr);
525 	invalidate_itlb_entry(addr);
526 	return XTHAL_SUCCESS;
527 #else
528 	return XTHAL_UNSUPPORTED;
529 #endif
530 #else
531 	return XTHAL_UNSUPPORTED;
532 #endif
533 }
534 
535