1 /***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2017 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28 /*
29 * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
30 * facilities.
31 */
32
33 #ifndef __CVMX_L2C_H__
34 #define __CVMX_L2C_H__
35
36 #include <uapi/asm/bitfield.h>
37
38 #define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro */
39 #define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro */
40 #define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro */
41
42 /* Based on 128 byte cache line size */
43 #define CVMX_L2C_IDX_ADDR_SHIFT 7
44 #define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
45
46 /* Defines for index aliasing computations */
47 #define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + \
48 cvmx_l2c_get_set_bits())
49 #define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
50 #define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
51
52 /* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
53 #define CVMX_L2C_TADS 1
54
55 union cvmx_l2c_tag {
56 uint64_t u64;
57 struct {
58 __BITFIELD_FIELD(uint64_t reserved:28,
59 __BITFIELD_FIELD(uint64_t V:1,
60 __BITFIELD_FIELD(uint64_t D:1,
61 __BITFIELD_FIELD(uint64_t L:1,
62 __BITFIELD_FIELD(uint64_t U:1,
63 __BITFIELD_FIELD(uint64_t addr:32,
64 ;))))))
65 } s;
66 };
67
68 /* L2C Performance Counter events. */
69 enum cvmx_l2c_event {
70 CVMX_L2C_EVENT_CYCLES = 0,
71 CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
72 CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
73 CVMX_L2C_EVENT_DATA_MISS = 3,
74 CVMX_L2C_EVENT_DATA_HIT = 4,
75 CVMX_L2C_EVENT_MISS = 5,
76 CVMX_L2C_EVENT_HIT = 6,
77 CVMX_L2C_EVENT_VICTIM_HIT = 7,
78 CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
79 CVMX_L2C_EVENT_TAG_PROBE = 9,
80 CVMX_L2C_EVENT_TAG_UPDATE = 10,
81 CVMX_L2C_EVENT_TAG_COMPLETE = 11,
82 CVMX_L2C_EVENT_TAG_DIRTY = 12,
83 CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
84 CVMX_L2C_EVENT_DATA_STORE_READ = 14,
85 CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
86 CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
87 CVMX_L2C_EVENT_WRITE_REQUEST = 17,
88 CVMX_L2C_EVENT_READ_REQUEST = 18,
89 CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
90 CVMX_L2C_EVENT_XMC_NOP = 20,
91 CVMX_L2C_EVENT_XMC_LDT = 21,
92 CVMX_L2C_EVENT_XMC_LDI = 22,
93 CVMX_L2C_EVENT_XMC_LDD = 23,
94 CVMX_L2C_EVENT_XMC_STF = 24,
95 CVMX_L2C_EVENT_XMC_STT = 25,
96 CVMX_L2C_EVENT_XMC_STP = 26,
97 CVMX_L2C_EVENT_XMC_STC = 27,
98 CVMX_L2C_EVENT_XMC_DWB = 28,
99 CVMX_L2C_EVENT_XMC_PL2 = 29,
100 CVMX_L2C_EVENT_XMC_PSL1 = 30,
101 CVMX_L2C_EVENT_XMC_IOBLD = 31,
102 CVMX_L2C_EVENT_XMC_IOBST = 32,
103 CVMX_L2C_EVENT_XMC_IOBDMA = 33,
104 CVMX_L2C_EVENT_XMC_IOBRSP = 34,
105 CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
106 CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
107 CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
108 CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
109 CVMX_L2C_EVENT_RSC_NOP = 39,
110 CVMX_L2C_EVENT_RSC_STDN = 40,
111 CVMX_L2C_EVENT_RSC_FILL = 41,
112 CVMX_L2C_EVENT_RSC_REFL = 42,
113 CVMX_L2C_EVENT_RSC_STIN = 43,
114 CVMX_L2C_EVENT_RSC_SCIN = 44,
115 CVMX_L2C_EVENT_RSC_SCFL = 45,
116 CVMX_L2C_EVENT_RSC_SCDN = 46,
117 CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
118 CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
119 CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
120 CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
121 CVMX_L2C_EVENT_LRF_REQ = 51,
122 CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
123 CVMX_L2C_EVENT_DT_WR_INVAL = 53,
124 CVMX_L2C_EVENT_MAX
125 };
126
127 /* L2C Performance Counter events for Octeon2. */
128 enum cvmx_l2c_tad_event {
129 CVMX_L2C_TAD_EVENT_NONE = 0,
130 CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
131 CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
132 CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
133 CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
134 CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
135 CVMX_L2C_TAD_EVENT_SC_PASS = 6,
136 CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
137 CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
138 CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
139 CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
140 CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
141 CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
142 CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
143 CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
144 CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
145 CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
146 CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
147 CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
148 CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
149 CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
150 CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
151 CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
152 CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
153 CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
154 CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
155 CVMX_L2C_TAD_EVENT_MAX
156 };
157
158 /**
159 * Configure one of the four L2 Cache performance counters to capture event
160 * occurrences.
161 *
162 * @counter: The counter to configure. Range 0..3.
163 * @event: The type of L2 Cache event occurrence to count.
164 * @clear_on_read: When asserted, any read of the performance counter
165 * clears the counter.
166 *
167 * @note The routine does not clear the counter.
168 */
169 void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
170 uint32_t clear_on_read);
171
172 /**
173 * Read the given L2 Cache performance counter. The counter must be configured
174 * before reading, but this routine does not enforce this requirement.
175 *
176 * @counter: The counter to configure. Range 0..3.
177 *
178 * Returns The current counter value.
179 */
180 uint64_t cvmx_l2c_read_perf(uint32_t counter);
181
182 /**
183 * Return the L2 Cache way partitioning for a given core.
184 *
185 * @core: The core processor of interest.
186 *
187 * Returns The mask specifying the partitioning. 0 bits in mask indicates
188 * the cache 'ways' that a core can evict from.
189 * -1 on error
190 */
191 int cvmx_l2c_get_core_way_partition(uint32_t core);
192
193 /**
194 * Partitions the L2 cache for a core
195 *
196 * @core: The core that the partitioning applies to.
197 * @mask: The partitioning of the ways expressed as a binary
198 * mask. A 0 bit allows the core to evict cache lines from
199 * a way, while a 1 bit blocks the core from evicting any
200 * lines from that way. There must be at least one allowed
201 * way (0 bit) in the mask.
202 *
203
204 * @note If any ways are blocked for all cores and the HW blocks, then
205 * those ways will never have any cache lines evicted from them.
206 * All cores and the hardware blocks are free to read from all
207 * ways regardless of the partitioning.
208 */
209 int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
210
211 /**
212 * Return the L2 Cache way partitioning for the hw blocks.
213 *
214 * Returns The mask specifying the reserved way. 0 bits in mask indicates
215 * the cache 'ways' that a core can evict from.
216 * -1 on error
217 */
218 int cvmx_l2c_get_hw_way_partition(void);
219
220 /**
221 * Partitions the L2 cache for the hardware blocks.
222 *
223 * @mask: The partitioning of the ways expressed as a binary
224 * mask. A 0 bit allows the core to evict cache lines from
225 * a way, while a 1 bit blocks the core from evicting any
226 * lines from that way. There must be at least one allowed
227 * way (0 bit) in the mask.
228 *
229
230 * @note If any ways are blocked for all cores and the HW blocks, then
231 * those ways will never have any cache lines evicted from them.
232 * All cores and the hardware blocks are free to read from all
233 * ways regardless of the partitioning.
234 */
235 int cvmx_l2c_set_hw_way_partition(uint32_t mask);
236
237
238 /**
239 * Locks a line in the L2 cache at the specified physical address
240 *
241 * @addr: physical address of line to lock
242 *
243 * Returns 0 on success,
244 * 1 if line not locked.
245 */
246 int cvmx_l2c_lock_line(uint64_t addr);
247
248 /**
249 * Locks a specified memory region in the L2 cache.
250 *
251 * Note that if not all lines can be locked, that means that all
252 * but one of the ways (associations) available to the locking
253 * core are locked. Having only 1 association available for
254 * normal caching may have a significant adverse affect on performance.
255 * Care should be taken to ensure that enough of the L2 cache is left
256 * unlocked to allow for normal caching of DRAM.
257 *
258 * @start: Physical address of the start of the region to lock
259 * @len: Length (in bytes) of region to lock
260 *
261 * Returns Number of requested lines that where not locked.
262 * 0 on success (all locked)
263 */
264 int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
265
266 /**
267 * Unlock and flush a cache line from the L2 cache.
268 * IMPORTANT: Must only be run by one core at a time due to use
269 * of L2C debug features.
270 * Note that this function will flush a matching but unlocked cache line.
271 * (If address is not in L2, no lines are flushed.)
272 *
273 * @address: Physical address to unlock
274 *
275 * Returns 0: line not unlocked
276 * 1: line unlocked
277 */
278 int cvmx_l2c_unlock_line(uint64_t address);
279
280 /**
281 * Unlocks a region of memory that is locked in the L2 cache
282 *
283 * @start: start physical address
284 * @len: length (in bytes) to unlock
285 *
286 * Returns Number of locked lines that the call unlocked
287 */
288 int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
289
290 /**
291 * Read the L2 controller tag for a given location in L2
292 *
293 * @association:
294 * Which association to read line from
295 * @index: Which way to read from.
296 *
297 * Returns l2c tag structure for line requested.
298 */
299 union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
300
301 /* Wrapper providing a deprecated old function name */
302 static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
303 uint32_t index)
304 __attribute__((deprecated));
cvmx_get_l2c_tag(uint32_t association,uint32_t index)305 static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
306 uint32_t index)
307 {
308 return cvmx_l2c_get_tag(association, index);
309 }
310
311
312 /**
313 * Returns the cache index for a given physical address
314 *
315 * @addr: physical address
316 *
317 * Returns L2 cache index
318 */
319 uint32_t cvmx_l2c_address_to_index(uint64_t addr);
320
321 /**
322 * Flushes (and unlocks) the entire L2 cache.
323 * IMPORTANT: Must only be run by one core at a time due to use
324 * of L2C debug features.
325 */
326 void cvmx_l2c_flush(void);
327
328 /**
329 *
330 * Returns Returns the size of the L2 cache in bytes,
331 * -1 on error (unrecognized model)
332 */
333 int cvmx_l2c_get_cache_size_bytes(void);
334
335 /**
336 * Return the number of sets in the L2 Cache
337 *
338 * Returns
339 */
340 int cvmx_l2c_get_num_sets(void);
341
342 /**
343 * Return log base 2 of the number of sets in the L2 cache
344 * Returns
345 */
346 int cvmx_l2c_get_set_bits(void);
347 /**
348 * Return the number of associations in the L2 Cache
349 *
350 * Returns
351 */
352 int cvmx_l2c_get_num_assoc(void);
353
354 /**
355 * Flush a line from the L2 cache
356 * This should only be called from one core at a time, as this routine
357 * sets the core to the 'debug' core in order to flush the line.
358 *
359 * @assoc: Association (or way) to flush
360 * @index: Index to flush
361 */
362 void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
363
364 #endif /* __CVMX_L2C_H__ */
365