1 /*  attribute.c - Cache attribute (memory access mode) related functions  */
2 
3 /* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/attribute.c#1 $ */
4 
5 /*
6  * Copyright (c) 2004-2009 Tensilica Inc.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining
9  * a copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sublicense, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included
17  * in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
23  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <xtensa/config/core.h>
29 
30 
31 
32 /*
33  *  Set the "cache attribute" (encoded memory access modes)
34  *  of the region of memory specified by <vaddr> and <size>.
35  *
36  *  This function is only supported on processor configurations
37  *  with region protection (or XEA1).  It has no effect on
38  *  a processor configured with an MMU (with autorefill).
39  *
40  *  SPECIFYING THE MEMORY REGION
41  *  The full (4 GB) address space may be specified with an
42  *  address of zero and a size of 0xFFFFFFFF (or -1);
43  *  in fact whenever <vaddr>+<size> equal 0xFFFFFFFF, <size>
44  *  is interpreted as one byte greater than that specified.
45  *
46  *  If the specified memory range exactly covers a series
47  *  of consecutive 512 MB regions, the cache attributes of
48  *  these regions are updated with the requested attribute.
49  *  If this is not the case, e.g. if either or both the
50  *  start and end of the range only partially cover a 512 MB
51  *  region, one of three results are possible:
52  *
53  *	1.  By default, the cache attribute of all regions
54  *	    covered, even just partially, is changed to
55  *	    the requested attribute.
56  *
57  *	2.  If the XTHAL_CAFLAG_EXACT flag is specified,
58  *	    a non-zero error code is returned.
59  *
60  *	3.  If the XTHAL_CAFLAG_NO_PARTIAL flag is specified
61  *	    (but not the EXACT flag), only regions fully
62  *	    covered by the specified range are updated with
63  *	    the requested attribute.
64  *
65  *  WRITEBACK CACHE HANDLING
66  *  This function automatically writes back dirty data when
67  *  switching a region from writeback mode to a non-writeback mode.
68  *  This writeback is done safely, ie. by first switching to writethrough
69  *  mode, then invoking xthal_dcache_all_writeback(), then switching to
70  *  the selected <cattr> mode.  Such a sequence is necessary to ensure
71  *  there is no longer any dirty data in the memory region by the time
72  *  this function returns, even in the presence of interrupts, speculation, etc.
73  *  This avoids memory coherency problems when switching from writeback
74  *  to bypass mode (in bypass mode, loads go directly to memory, ignoring
75  *  any dirty data in the cache; also, such dirty data can still be castout
76  *  due to seemingly unrelated stores).
77  *  This automatic write-back can be disabled using the XTHAL_CAFLAG_NO_AUTO_WB flag.
78  *
79  *  CACHE DISABLE THEN ENABLE HANDLING
80  *  To avoid cache coherency issues when the cache is disabled, then
81  *  memory is modified, then then cache is re-enabled (thus making
82  *  visible stale cache entries), this function automatically
83  *  invalidates the cache when any region switches to bypass mode.
84  *  For efficiency, the entire cache is invalidated -- this is done
85  *  using writeback-invalidate operations to ensure coherency even
86  *  when other regions still have write-back caches enabled.
87  *  This automatic invalidate can be disabled using the XTHAL_CAFLAG_NO_AUTO_INV flag.
88  *
89  *  Parameters:
90  *	vaddr	starting virtual address of region of memory
91  *
92  *	size	number of bytes in region of memory
93  *		(see above, SPECIFYING THE MEMORY REGION)
94  *
95  *	cattr	cache attribute (encoded);
96  *		typically taken from compile-time HAL constants
97  *		XCHAL_CA_{BYPASS[BUF], WRITETHRU, WRITEBACK[_NOALLOC], ILLEGAL}
98  *		(defined in <xtensa/config/core.h>);
99  *		in XEA1, this corresponds to the value of a nibble
100  *		in the CACHEATTR register;
101  *		in XEA2, this corresponds to the value of the
102  *		cache attribute (CA) field of each TLB entry
103  *
104  *		On MPU configurations, the cattr is composed of accessRights
105  *		and memoryType.  The accessRights occupy bits 0..3 and are
106  *		typically taken from the XTHAL_AR constants.  The memory type
107  *		is specified by either a bitwise or-ing of the XTHAL_MEM_...
108  *		constants or if none of the XTHAL_MEM_... constants are
109  *		specified, bits 4..12 are used for the memory type (that
110  *		allows a cattr obtained by xthal_v2p() to be passed directly.
111  *
112  *		In addition on MPU configurations if the
113  *		XTHAL_MPU_USE_EXISTING_MEMORY_TYPE bit is set then the existing
114  *		memoryType at the first address in the region is used for the
115  *		memoryType of the new region.
116  *
117  *		Likewise, if the XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS bit is set
118  *		in cattr, then the existing accessRights at the first address
119  *		in the region are used for the accessRights of the new region.
120  *
121  *	flags	bitwise combination of flags XTHAL_CAFLAG_*
122  *		(see xtensa/hal.h for brief description of each flag);
123  *		(see also various descriptions above);
124  *
125  *		The XTHAL_CAFLAG_EXPAND flag prevents attribute changes
126  *		to regions whose current cache attribute already provide
127  *		greater access than the requested attribute.
128  *		This ensures access to each region can only "expand",
129  *		and thus continue to work correctly in most instances,
130  *		possibly at the expense of performance.  This helps
131  *		make this flag safer to use in a variety of situations.
132  *		For the purposes of this flag, cache attributes are
133  *		ordered (in "expansion" order, from least to greatest
134  *		access) as follows:
135  *			XCHAL_CA_ILLEGAL	no access allowed
136  *			(various special and reserved attributes)
137  *			XCHAL_CA_WRITEBACK	writeback cached
138  *			XCHAL_CA_WRITEBACK_NOALLOC writeback no-write-alloc
139  *			XCHAL_CA_WRITETHRU	writethrough cached
140  *			XCHAL_CA_BYPASSBUF	bypass with write buffering
141  *			XCHAL_CA_BYPASS		bypass (uncached)
142  *		This is consistent with requirements of certain
143  *		devices that no caches be used, or in certain cases
144  *		that writethrough caching is allowed but not writeback.
145  *		Thus, bypass mode is assumed to work for most/all types
146  *		of devices and memories (albeit at reduced performance
147  *		compared to cached modes), and is ordered as providing
148  *		greatest access (to most devices).
149  *		Thus, this XTHAL_CAFLAG_EXPAND flag has no effect when
150  *		requesting the XCHAL_CA_BYPASS attribute (one can always
151  *		expand to bypass mode).  And at the other extreme,
152  *		no action is ever taken by this function when specifying
153  *		both the XTHAL_CAFLAG_EXPAND flag and the XCHAL_CA_ILLEGAL
154  *		cache attribute.
155  *
156  *		The XTHAL_CAFLAG_EXPAND is not supported on MPU configurations.
157  *
158  *  Returns:
159  *	0	successful, or size is zero
160  *	-1	XTHAL_CAFLAG_NO_PARTIAL flag specified and address range
161  *		is valid with a non-zero size, however no 512 MB region (or page)
162  *		is completely covered by the range
163  *	-2	XTHAL_CAFLAG_EXACT flag specified, and address range does
164  *		not exactly specify a 512 MB region (or page)
165  *	-3	invalid address range specified (wraps around the end of memory)
166  *	-4	function not supported in this processor configuration
167  */
xthal_set_region_attribute(void * vaddr,unsigned size,unsigned cattr,unsigned flags)168 int  xthal_set_region_attribute( void *vaddr, unsigned size, unsigned cattr, unsigned flags )
169 {
170 #if XCHAL_HAVE_MPU
171     if (cattr & 0xffffe000) // check if XTHAL mem flags were supplied
172         // in this case just pass cattr as the memType paramenter
173        return xthal_mpu_set_region_attribute(vaddr, size, cattr, cattr, flags);
174     else
175        // otherwise we take the bits 0-3 for accessRights and bits 4-13 as the memoryType
176        return xthal_mpu_set_region_attribute(vaddr, size, cattr & 0xf, (cattr & 0x1ff0) >> 4, flags);
177 #elif XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY
178     return -4;		/* full MMU not supported */
179 #else
180 /*  These cache attribute encodings are valid for XEA1 and region protection only:  */
181 # if XCHAL_HAVE_PTP_MMU
182 #  define CA_BYPASS		XCHAL_CA_BYPASS
183 # ifdef XCHAL_CA_BYPASSBUF
184 #  define CA_BYPASSBUF		XCHAL_CA_BYPASSBUF
185 # else
186 #  define CA_BYPASSBUF	XCHAL_CA_BYPASS
187 # endif
188 #  define CA_WRITETHRU		XCHAL_CA_WRITETHRU
189 #  define CA_WRITEBACK		XCHAL_CA_WRITEBACK
190 #  define CA_WRITEBACK_NOALLOC	XCHAL_CA_WRITEBACK_NOALLOC
191 #  define CA_ILLEGAL		XCHAL_CA_ILLEGAL
192 # else
193 /*  Hardcode these, because they get remapped when caches or writeback not configured:  */
194 #  define CA_BYPASS		2
195 #  define CA_BYPASSBUF		6
196 #  define CA_WRITETHRU		1
197 #  define CA_WRITEBACK		4
198 #  define CA_WRITEBACK_NOALLOC	5
199 #  define CA_ILLEGAL		15
200 # endif
201 # define CA_MASK	0xF	/*((1L<<XCHAL_CA_BITS)-1)*/	/* mask of cache attribute bits */
202 # define IS_CACHED(attr) ((attr == CA_BYPASS) ||  (attr == CA_BYPASSBUF))
203 
204     unsigned start_region, start_offset, end_vaddr, end_region, end_offset;
205     unsigned cacheattr, cachewrtr, i, disabled_cache = 0;
206 
207     if (size == 0)
208 	return 0;
209     end_vaddr = (unsigned)vaddr + size - 1;
210     if (end_vaddr < (unsigned)vaddr)
211 	return -3;		/* address overflow/wraparound error */
212     if (end_vaddr == 0xFFFFFFFE /*&& (unsigned)vaddr == 0*/ )
213 	end_vaddr = 0xFFFFFFFF;	/* allow specifying 4 GB */
214     start_region = ((unsigned)vaddr >> 29);
215     start_offset = ((unsigned)vaddr & 0x1FFFFFFF);
216     end_region = (end_vaddr >> 29);
217     end_offset = ((end_vaddr+1) & 0x1FFFFFFF);
218     if (flags & XTHAL_CAFLAG_EXACT) {
219 	if (start_offset != 0 || end_offset != 0)
220 	    return -2;		/* not an exact-sized range */
221     } else if (flags & XTHAL_CAFLAG_NO_PARTIAL) {
222 	if (start_offset != 0)
223 	    start_region++;
224 	if (end_offset != 0)
225 	    end_region--;
226 	if (start_region > end_region)
227 	    return -1;		/* nothing fully covered by specified range */
228     }
229     cacheattr = cachewrtr = xthal_get_cacheattr();
230     cattr &= CA_MASK;
231 # if XCHAL_ICACHE_SIZE == 0 && XCHAL_DCACHE_SIZE == 0
232     if (cattr == CA_WRITETHRU || cattr == CA_WRITEBACK || cattr == CA_WRITEBACK_NOALLOC)
233 	cattr = CA_BYPASS;	/* no caches configured, only do protection */
234 # elif XCHAL_DCACHE_IS_WRITEBACK == 0
235     if (cattr == CA_WRITEBACK || cattr == CA_WRITEBACK_NOALLOC)
236 	cattr = CA_WRITETHRU;	/* no writeback configured for data cache */
237 # endif
238     for (i = start_region; i <= end_region; i++) {
239 	unsigned sh = (i << 2);		/* bit offset of nibble for region i */
240 	unsigned oldattr = ((cacheattr >> sh) & CA_MASK);
241 	unsigned newattr = cattr;
242 	if (flags & XTHAL_CAFLAG_EXPAND) {
243 	    /*  This array determines whether a cache attribute can be changed
244 	     *  from <a> to <b> with the EXPAND flag; an attribute's "pri"
245 	     *  value (from this array) can only monotonically increase:  */
246 	    const static signed char _Xthal_ca_pri[16] = {[CA_ILLEGAL] = -1,
247 			[CA_WRITEBACK] = 3, [CA_WRITEBACK_NOALLOC] = 3, [CA_WRITETHRU] = 4, [CA_BYPASSBUF] = 8, [CA_BYPASS] = 9 };
248 	    if (_Xthal_ca_pri[newattr] < _Xthal_ca_pri[oldattr])
249 		newattr = oldattr;	/* avoid going to lesser access */
250 	}
251 	if (IS_CACHED(newattr) && !IS_CACHED(oldattr))
252 	    disabled_cache = 1;		/* we're disabling the cache for some region */
253 # if XCHAL_DCACHE_IS_WRITEBACK
254 	{
255 	unsigned tmpattr = newattr;
256 	if ((oldattr == CA_WRITEBACK || oldattr == CA_WRITEBACK_NOALLOC)
257 	     && newattr != CA_WRITEBACK && newattr != CA_WRITEBACK_NOALLOC)	/* leaving writeback mode? */
258 	    tmpattr = CA_WRITETHRU;				/* leave it safely! */
259 	cachewrtr = ((cachewrtr & ~(CA_MASK << sh)) | (tmpattr << sh));
260 	}
261 # endif
262 	cacheattr = ((cacheattr & ~(CA_MASK << sh)) | (newattr << sh));
263     }
264 # if XCHAL_DCACHE_IS_WRITEBACK
265     if (cacheattr != cachewrtr		/* need to leave writeback safely? */
266 	&& (flags & XTHAL_CAFLAG_NO_AUTO_WB) == 0) {
267 	xthal_set_cacheattr(cachewrtr);	/* set to writethru first, to safely writeback any dirty data */
268 	xthal_dcache_all_writeback();	/* much quicker than scanning entire 512MB region(s) */
269     }
270 # endif
271     xthal_set_cacheattr(cacheattr);
272     /*  After disabling the cache, invalidate cache entries
273      *  to avoid coherency issues when later re-enabling it:  */
274     if (disabled_cache && (flags & XTHAL_CAFLAG_NO_AUTO_INV) == 0) {
275 	xthal_dcache_all_writeback_inv();	/* we might touch regions of memory still enabled write-back,
276 						   so must use writeback-invalidate, not just invalidate */
277 	xthal_icache_all_invalidate();
278     }
279     return( 0 );
280 #endif /* !(XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY) */
281 }
282 
283