1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID			0x40
7 #define SNBEP_GIDNIDMAP			0x54
8 
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
20 #define SNBEP_PMON_CTL_RST		(1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
23 #define SNBEP_PMON_CTL_EN		(1 << 22)
24 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
27 					 SNBEP_PMON_CTL_UMASK_MASK | \
28 					 SNBEP_PMON_CTL_EDGE_DET | \
29 					 SNBEP_PMON_CTL_INVERT | \
30 					 SNBEP_PMON_CTL_TRESH_MASK)
31 
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
35 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
36 				 SNBEP_PMON_CTL_UMASK_MASK | \
37 				 SNBEP_PMON_CTL_EDGE_DET | \
38 				 SNBEP_PMON_CTL_INVERT | \
39 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40 
41 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
43 						 SNBEP_CBO_PMON_CTL_TID_EN)
44 
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
51 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
52 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 				 SNBEP_PMON_CTL_EDGE_DET | \
54 				 SNBEP_PMON_CTL_INVERT | \
55 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58 
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
60 				(SNBEP_PMON_RAW_EVENT_MASK | \
61 				 SNBEP_PMON_CTL_EV_SEL_EXT)
62 
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
65 #define SNBEP_PCI_PMON_CTL0			0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0			0xa0
68 
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
81 
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0			0xc16
84 #define SNBEP_U_MSR_PMON_CTL0			0xc10
85 
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
88 
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
94 #define SNBEP_CBO_MSR_OFFSET			0x20
95 
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
100 
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
102 	.event = (e),				\
103 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
104 	.config_mask = (m),			\
105 	.idx = (i)				\
106 }
107 
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
116 
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
121 					 SNBEP_PMON_CTL_UMASK_MASK | \
122 					 SNBEP_PMON_CTL_EDGE_DET | \
123 					 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
128 
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
130 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
131 				 SNBEP_PMON_CTL_UMASK_MASK | \
132 				 SNBEP_PMON_CTL_EDGE_DET | \
133 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
136 						 SNBEP_CBO_PMON_CTL_TID_EN)
137 
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
146 
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
150 				(IVBEP_PMON_RAW_EVENT_MASK | \
151 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
154 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
155 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 				 SNBEP_PMON_CTL_EDGE_DET | \
157 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
162 				(IVBEP_PMON_RAW_EVENT_MASK | \
163 				 SNBEP_PMON_CTL_EV_SEL_EXT)
164 
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166 				((1ULL << (n)) - 1)))
167 
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0			0x709
170 #define HSWEP_U_MSR_PMON_CTL0			0x705
171 #define HSWEP_U_MSR_PMON_FILTER			0x707
172 
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
175 
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
187 #define HSWEP_CBO_MSR_OFFSET			0x10
188 
189 
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
198 
199 
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0			0x726
202 #define HSWEP_S0_MSR_PMON_CTL0			0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
204 #define HSWEP_SBOX_MSR_OFFSET			0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
206 						 SNBEP_CBO_PMON_CTL_TID_EN)
207 
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
213 
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 						SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET			0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 					 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
230 
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
233 #define KNL_UCLK_MSR_PMON_CTL0			0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
237 #define KNL_PMON_FIXED_CTL_EN			0x1
238 
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
245 
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
252 
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
256 						 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
262 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 				 SNBEP_PMON_CTL_EDGE_DET | \
266 				 SNBEP_CBO_PMON_CTL_TID_EN | \
267 				 SNBEP_PMON_CTL_INVERT | \
268 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271 
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID			0xc0
274 #define SKX_GIDNIDMAP			0xd4
275 
276 /*
277  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278  * that BIOS programmed. MSR has package scope.
279  * |  Bit  |  Default  |  Description
280  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
281  *                       numbers have been initialized. (RO)
282  * |[62:48]|    ---    | Reserved
283  * |[47:40]|    00h    | BUS_NUM_5 — Return the bus number BIOS assigned
284  *                       CPUBUSNO(5). (RO)
285  * |[39:32]|    00h    | BUS_NUM_4 — Return the bus number BIOS assigned
286  *                       CPUBUSNO(4). (RO)
287  * |[31:24]|    00h    | BUS_NUM_3 — Return the bus number BIOS assigned
288  *                       CPUBUSNO(3). (RO)
289  * |[23:16]|    00h    | BUS_NUM_2 — Return the bus number BIOS assigned
290  *                       CPUBUSNO(2). (RO)
291  * |[15:8] |    00h    | BUS_NUM_1 — Return the bus number BIOS assigned
292  *                       CPUBUSNO(1). (RO)
293  * | [7:0] |    00h    | BUS_NUM_0 — Return the bus number BIOS assigned
294  *                       CPUBUSNO(0). (RO)
295  */
296 #define SKX_MSR_CPU_BUS_NUMBER		0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
298 #define BUS_NUM_STRIDE			8
299 
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
314 
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
319 #define SKX_IIO_MSR_OFFSET		0x20
320 
321 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
323 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
326 					 SNBEP_PMON_CTL_UMASK_MASK | \
327 					 SNBEP_PMON_CTL_EDGE_DET | \
328 					 SNBEP_PMON_CTL_INVERT | \
329 					 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
331 					 SKX_PMON_CTL_CH_MASK | \
332 					 SKX_PMON_CTL_FC_MASK)
333 
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
338 #define SKX_IRP_MSR_OFFSET		0x20
339 
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0		0x350
342 #define SKX_UPI_PCI_PMON_CTR0		0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
344 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
345 
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0		0x228
348 #define SKX_M2M_PCI_PMON_CTR0		0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
350 
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0			0x1f98
353 #define SNR_U_MSR_PMON_CTL0			0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
356 
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
363 
364 
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
369 #define SNR_IIO_MSR_OFFSET			0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
371 
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
376 #define SNR_IRP_MSR_OFFSET			0x10
377 
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET			0x10
383 
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
389 
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0			0x468
392 #define SNR_M2M_PCI_PMON_CTR0			0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
395 
396 /* SNR PCIE3 */
397 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
400 
401 /* SNR IMC */
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
404 #define SNR_IMC_MMIO_PMON_CTL0			0x40
405 #define SNR_IMC_MMIO_PMON_CTR0			0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
407 #define SNR_IMC_MMIO_OFFSET			0x4000
408 #define SNR_IMC_MMIO_SIZE			0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
410 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
413 
414 /* ICX CHA */
415 #define ICX_C34_MSR_PMON_CTR0			0xb68
416 #define ICX_C34_MSR_PMON_CTL0			0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
419 
420 /* ICX IIO */
421 #define ICX_IIO_MSR_PMON_CTL0			0xa58
422 #define ICX_IIO_MSR_PMON_CTR0			0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
424 
425 /* ICX IRP */
426 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
429 
430 /* ICX M2PCIE */
431 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
434 
435 /* ICX UPI */
436 #define ICX_UPI_PCI_PMON_CTL0			0x350
437 #define ICX_UPI_PCI_PMON_CTR0			0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
439 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
440 
441 /* ICX M3UPI*/
442 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
445 
446 /* ICX IMC */
447 #define ICX_NUMBER_IMC_CHN			2
448 #define ICX_IMC_MEM_STRIDE			0x4
449 
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
528 
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531 	struct pci_dev *pdev = box->pci_dev;
532 	int box_ctl = uncore_pci_box_ctl(box);
533 	u32 config = 0;
534 
535 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536 		config |= SNBEP_PMON_BOX_CTL_FRZ;
537 		pci_write_config_dword(pdev, box_ctl, config);
538 	}
539 }
540 
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
542 {
543 	struct pci_dev *pdev = box->pci_dev;
544 	int box_ctl = uncore_pci_box_ctl(box);
545 	u32 config = 0;
546 
547 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549 		pci_write_config_dword(pdev, box_ctl, config);
550 	}
551 }
552 
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
554 {
555 	struct pci_dev *pdev = box->pci_dev;
556 	struct hw_perf_event *hwc = &event->hw;
557 
558 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
559 }
560 
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
562 {
563 	struct pci_dev *pdev = box->pci_dev;
564 	struct hw_perf_event *hwc = &event->hw;
565 
566 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
567 }
568 
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
570 {
571 	struct pci_dev *pdev = box->pci_dev;
572 	struct hw_perf_event *hwc = &event->hw;
573 	u64 count = 0;
574 
575 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
577 
578 	return count;
579 }
580 
snbep_uncore_pci_init_box(struct intel_uncore_box * box)581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
582 {
583 	struct pci_dev *pdev = box->pci_dev;
584 	int box_ctl = uncore_pci_box_ctl(box);
585 
586 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
587 }
588 
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
590 {
591 	u64 config;
592 	unsigned msr;
593 
594 	msr = uncore_msr_box_ctl(box);
595 	if (msr) {
596 		rdmsrl(msr, config);
597 		config |= SNBEP_PMON_BOX_CTL_FRZ;
598 		wrmsrl(msr, config);
599 	}
600 }
601 
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
603 {
604 	u64 config;
605 	unsigned msr;
606 
607 	msr = uncore_msr_box_ctl(box);
608 	if (msr) {
609 		rdmsrl(msr, config);
610 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
611 		wrmsrl(msr, config);
612 	}
613 }
614 
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
616 {
617 	struct hw_perf_event *hwc = &event->hw;
618 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
619 
620 	if (reg1->idx != EXTRA_REG_NONE)
621 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
622 
623 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
624 }
625 
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627 					struct perf_event *event)
628 {
629 	struct hw_perf_event *hwc = &event->hw;
630 
631 	wrmsrl(hwc->config_base, hwc->config);
632 }
633 
snbep_uncore_msr_init_box(struct intel_uncore_box * box)634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
635 {
636 	unsigned msr = uncore_msr_box_ctl(box);
637 
638 	if (msr)
639 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
640 }
641 
642 static struct attribute *snbep_uncore_formats_attr[] = {
643 	&format_attr_event.attr,
644 	&format_attr_umask.attr,
645 	&format_attr_edge.attr,
646 	&format_attr_inv.attr,
647 	&format_attr_thresh8.attr,
648 	NULL,
649 };
650 
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652 	&format_attr_event.attr,
653 	&format_attr_umask.attr,
654 	&format_attr_edge.attr,
655 	&format_attr_inv.attr,
656 	&format_attr_thresh5.attr,
657 	NULL,
658 };
659 
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661 	&format_attr_event.attr,
662 	&format_attr_umask.attr,
663 	&format_attr_edge.attr,
664 	&format_attr_tid_en.attr,
665 	&format_attr_inv.attr,
666 	&format_attr_thresh8.attr,
667 	&format_attr_filter_tid.attr,
668 	&format_attr_filter_nid.attr,
669 	&format_attr_filter_state.attr,
670 	&format_attr_filter_opc.attr,
671 	NULL,
672 };
673 
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675 	&format_attr_event.attr,
676 	&format_attr_occ_sel.attr,
677 	&format_attr_edge.attr,
678 	&format_attr_inv.attr,
679 	&format_attr_thresh5.attr,
680 	&format_attr_occ_invert.attr,
681 	&format_attr_occ_edge.attr,
682 	&format_attr_filter_band0.attr,
683 	&format_attr_filter_band1.attr,
684 	&format_attr_filter_band2.attr,
685 	&format_attr_filter_band3.attr,
686 	NULL,
687 };
688 
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690 	&format_attr_event_ext.attr,
691 	&format_attr_umask.attr,
692 	&format_attr_edge.attr,
693 	&format_attr_inv.attr,
694 	&format_attr_thresh8.attr,
695 	&format_attr_match_rds.attr,
696 	&format_attr_match_rnid30.attr,
697 	&format_attr_match_rnid4.attr,
698 	&format_attr_match_dnid.attr,
699 	&format_attr_match_mc.attr,
700 	&format_attr_match_opc.attr,
701 	&format_attr_match_vnw.attr,
702 	&format_attr_match0.attr,
703 	&format_attr_match1.attr,
704 	&format_attr_mask_rds.attr,
705 	&format_attr_mask_rnid30.attr,
706 	&format_attr_mask_rnid4.attr,
707 	&format_attr_mask_dnid.attr,
708 	&format_attr_mask_mc.attr,
709 	&format_attr_mask_opc.attr,
710 	&format_attr_mask_vnw.attr,
711 	&format_attr_mask0.attr,
712 	&format_attr_mask1.attr,
713 	NULL,
714 };
715 
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
718 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
719 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724 	{ /* end: all zeroes */ },
725 };
726 
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
729 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
731 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
732 	{ /* end: all zeroes */ },
733 };
734 
735 static const struct attribute_group snbep_uncore_format_group = {
736 	.name = "format",
737 	.attrs = snbep_uncore_formats_attr,
738 };
739 
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
741 	.name = "format",
742 	.attrs = snbep_uncore_ubox_formats_attr,
743 };
744 
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
746 	.name = "format",
747 	.attrs = snbep_uncore_cbox_formats_attr,
748 };
749 
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
751 	.name = "format",
752 	.attrs = snbep_uncore_pcu_formats_attr,
753 };
754 
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
756 	.name = "format",
757 	.attrs = snbep_uncore_qpi_formats_attr,
758 };
759 
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
761 	.disable_box	= snbep_uncore_msr_disable_box,		\
762 	.enable_box	= snbep_uncore_msr_enable_box,		\
763 	.disable_event	= snbep_uncore_msr_disable_event,	\
764 	.enable_event	= snbep_uncore_msr_enable_event,	\
765 	.read_counter	= uncore_msr_read_counter
766 
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
768 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
769 	.init_box	= snbep_uncore_msr_init_box		\
770 
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
773 };
774 
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
776 	.init_box	= snbep_uncore_pci_init_box,		\
777 	.disable_box	= snbep_uncore_pci_disable_box,		\
778 	.enable_box	= snbep_uncore_pci_enable_box,		\
779 	.disable_event	= snbep_uncore_pci_disable_event,	\
780 	.read_counter	= snbep_uncore_pci_read_counter
781 
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784 	.enable_event	= snbep_uncore_pci_enable_event,	\
785 };
786 
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
814 	EVENT_CONSTRAINT_END
815 };
816 
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 	EVENT_CONSTRAINT_END
829 };
830 
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
860 	EVENT_CONSTRAINT_END
861 };
862 
863 static struct intel_uncore_type snbep_uncore_ubox = {
864 	.name		= "ubox",
865 	.num_counters   = 2,
866 	.num_boxes	= 1,
867 	.perf_ctr_bits	= 44,
868 	.fixed_ctr_bits	= 48,
869 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
870 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
871 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874 	.ops		= &snbep_uncore_msr_ops,
875 	.format_group	= &snbep_uncore_ubox_format_group,
876 };
877 
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
904 	EVENT_EXTRA_END
905 };
906 
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
908 {
909 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
911 	int i;
912 
913 	if (uncore_box_is_fake(box))
914 		return;
915 
916 	for (i = 0; i < 5; i++) {
917 		if (reg1->alloc & (0x1 << i))
918 			atomic_sub(1 << (i * 6), &er->ref);
919 	}
920 	reg1->alloc = 0;
921 }
922 
923 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925 			    u64 (*cbox_filter_mask)(int fields))
926 {
927 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
929 	int i, alloc = 0;
930 	unsigned long flags;
931 	u64 mask;
932 
933 	if (reg1->idx == EXTRA_REG_NONE)
934 		return NULL;
935 
936 	raw_spin_lock_irqsave(&er->lock, flags);
937 	for (i = 0; i < 5; i++) {
938 		if (!(reg1->idx & (0x1 << i)))
939 			continue;
940 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
941 			continue;
942 
943 		mask = cbox_filter_mask(0x1 << i);
944 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945 		    !((reg1->config ^ er->config) & mask)) {
946 			atomic_add(1 << (i * 6), &er->ref);
947 			er->config &= ~mask;
948 			er->config |= reg1->config & mask;
949 			alloc |= (0x1 << i);
950 		} else {
951 			break;
952 		}
953 	}
954 	raw_spin_unlock_irqrestore(&er->lock, flags);
955 	if (i < 5)
956 		goto fail;
957 
958 	if (!uncore_box_is_fake(box))
959 		reg1->alloc |= alloc;
960 
961 	return NULL;
962 fail:
963 	for (; i >= 0; i--) {
964 		if (alloc & (0x1 << i))
965 			atomic_sub(1 << (i * 6), &er->ref);
966 	}
967 	return &uncore_constraint_empty;
968 }
969 
snbep_cbox_filter_mask(int fields)970 static u64 snbep_cbox_filter_mask(int fields)
971 {
972 	u64 mask = 0;
973 
974 	if (fields & 0x1)
975 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
976 	if (fields & 0x2)
977 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
978 	if (fields & 0x4)
979 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
980 	if (fields & 0x8)
981 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
982 
983 	return mask;
984 }
985 
986 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
988 {
989 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
990 }
991 
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
993 {
994 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995 	struct extra_reg *er;
996 	int idx = 0;
997 
998 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999 		if (er->event != (event->hw.config & er->config_mask))
1000 			continue;
1001 		idx |= er->idx;
1002 	}
1003 
1004 	if (idx) {
1005 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1008 		reg1->idx = idx;
1009 	}
1010 	return 0;
1011 }
1012 
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015 	.hw_config		= snbep_cbox_hw_config,
1016 	.get_constraint		= snbep_cbox_get_constraint,
1017 	.put_constraint		= snbep_cbox_put_constraint,
1018 };
1019 
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1021 	.name			= "cbox",
1022 	.num_counters		= 4,
1023 	.num_boxes		= 8,
1024 	.perf_ctr_bits		= 44,
1025 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1026 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1027 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1029 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1030 	.num_shared_regs	= 1,
1031 	.constraints		= snbep_uncore_cbox_constraints,
1032 	.ops			= &snbep_uncore_cbox_ops,
1033 	.format_group		= &snbep_uncore_cbox_format_group,
1034 };
1035 
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1037 {
1038 	struct hw_perf_event *hwc = &event->hw;
1039 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040 	u64 config = reg1->config;
1041 
1042 	if (new_idx > reg1->idx)
1043 		config <<= 8 * (new_idx - reg1->idx);
1044 	else
1045 		config >>= 8 * (reg1->idx - new_idx);
1046 
1047 	if (modify) {
1048 		hwc->config += new_idx - reg1->idx;
1049 		reg1->config = config;
1050 		reg1->idx = new_idx;
1051 	}
1052 	return config;
1053 }
1054 
1055 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1057 {
1058 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060 	unsigned long flags;
1061 	int idx = reg1->idx;
1062 	u64 mask, config1 = reg1->config;
1063 	bool ok = false;
1064 
1065 	if (reg1->idx == EXTRA_REG_NONE ||
1066 	    (!uncore_box_is_fake(box) && reg1->alloc))
1067 		return NULL;
1068 again:
1069 	mask = 0xffULL << (idx * 8);
1070 	raw_spin_lock_irqsave(&er->lock, flags);
1071 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072 	    !((config1 ^ er->config) & mask)) {
1073 		atomic_add(1 << (idx * 8), &er->ref);
1074 		er->config &= ~mask;
1075 		er->config |= config1 & mask;
1076 		ok = true;
1077 	}
1078 	raw_spin_unlock_irqrestore(&er->lock, flags);
1079 
1080 	if (!ok) {
1081 		idx = (idx + 1) % 4;
1082 		if (idx != reg1->idx) {
1083 			config1 = snbep_pcu_alter_er(event, idx, false);
1084 			goto again;
1085 		}
1086 		return &uncore_constraint_empty;
1087 	}
1088 
1089 	if (!uncore_box_is_fake(box)) {
1090 		if (idx != reg1->idx)
1091 			snbep_pcu_alter_er(event, idx, true);
1092 		reg1->alloc = 1;
1093 	}
1094 	return NULL;
1095 }
1096 
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1098 {
1099 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1101 
1102 	if (uncore_box_is_fake(box) || !reg1->alloc)
1103 		return;
1104 
1105 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1106 	reg1->alloc = 0;
1107 }
1108 
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1110 {
1111 	struct hw_perf_event *hwc = &event->hw;
1112 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1114 
1115 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117 		reg1->idx = ev_sel - 0xb;
1118 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1119 	}
1120 	return 0;
1121 }
1122 
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125 	.hw_config		= snbep_pcu_hw_config,
1126 	.get_constraint		= snbep_pcu_get_constraint,
1127 	.put_constraint		= snbep_pcu_put_constraint,
1128 };
1129 
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1131 	.name			= "pcu",
1132 	.num_counters		= 4,
1133 	.num_boxes		= 1,
1134 	.perf_ctr_bits		= 48,
1135 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1136 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1137 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1139 	.num_shared_regs	= 1,
1140 	.ops			= &snbep_uncore_pcu_ops,
1141 	.format_group		= &snbep_uncore_pcu_format_group,
1142 };
1143 
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1145 	&snbep_uncore_ubox,
1146 	&snbep_uncore_cbox,
1147 	&snbep_uncore_pcu,
1148 	NULL,
1149 };
1150 
snbep_uncore_cpu_init(void)1151 void snbep_uncore_cpu_init(void)
1152 {
1153 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155 	uncore_msr_uncores = snbep_msr_uncores;
1156 }
1157 
1158 enum {
1159 	SNBEP_PCI_QPI_PORT0_FILTER,
1160 	SNBEP_PCI_QPI_PORT1_FILTER,
1161 	BDX_PCI_QPI_PORT2_FILTER,
1162 	HSWEP_PCI_PCU_3,
1163 };
1164 
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1165 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1166 {
1167 	struct hw_perf_event *hwc = &event->hw;
1168 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1169 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1170 
1171 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1172 		reg1->idx = 0;
1173 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1174 		reg1->config = event->attr.config1;
1175 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1176 		reg2->config = event->attr.config2;
1177 	}
1178 	return 0;
1179 }
1180 
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1181 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1182 {
1183 	struct pci_dev *pdev = box->pci_dev;
1184 	struct hw_perf_event *hwc = &event->hw;
1185 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1186 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1187 
1188 	if (reg1->idx != EXTRA_REG_NONE) {
1189 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1190 		int die = box->dieid;
1191 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1192 
1193 		if (filter_pdev) {
1194 			pci_write_config_dword(filter_pdev, reg1->reg,
1195 						(u32)reg1->config);
1196 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1197 						(u32)(reg1->config >> 32));
1198 			pci_write_config_dword(filter_pdev, reg2->reg,
1199 						(u32)reg2->config);
1200 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1201 						(u32)(reg2->config >> 32));
1202 		}
1203 	}
1204 
1205 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1206 }
1207 
1208 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1209 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1210 	.enable_event		= snbep_qpi_enable_event,
1211 	.hw_config		= snbep_qpi_hw_config,
1212 	.get_constraint		= uncore_get_constraint,
1213 	.put_constraint		= uncore_put_constraint,
1214 };
1215 
1216 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1217 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1218 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1219 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1220 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1221 	.ops		= &snbep_uncore_pci_ops,		\
1222 	.format_group	= &snbep_uncore_format_group
1223 
1224 static struct intel_uncore_type snbep_uncore_ha = {
1225 	.name		= "ha",
1226 	.num_counters   = 4,
1227 	.num_boxes	= 1,
1228 	.perf_ctr_bits	= 48,
1229 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1230 };
1231 
1232 static struct intel_uncore_type snbep_uncore_imc = {
1233 	.name		= "imc",
1234 	.num_counters   = 4,
1235 	.num_boxes	= 4,
1236 	.perf_ctr_bits	= 48,
1237 	.fixed_ctr_bits	= 48,
1238 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1239 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1240 	.event_descs	= snbep_uncore_imc_events,
1241 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1242 };
1243 
1244 static struct intel_uncore_type snbep_uncore_qpi = {
1245 	.name			= "qpi",
1246 	.num_counters		= 4,
1247 	.num_boxes		= 2,
1248 	.perf_ctr_bits		= 48,
1249 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1250 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1251 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1252 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1253 	.num_shared_regs	= 1,
1254 	.ops			= &snbep_uncore_qpi_ops,
1255 	.event_descs		= snbep_uncore_qpi_events,
1256 	.format_group		= &snbep_uncore_qpi_format_group,
1257 };
1258 
1259 
1260 static struct intel_uncore_type snbep_uncore_r2pcie = {
1261 	.name		= "r2pcie",
1262 	.num_counters   = 4,
1263 	.num_boxes	= 1,
1264 	.perf_ctr_bits	= 44,
1265 	.constraints	= snbep_uncore_r2pcie_constraints,
1266 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1267 };
1268 
1269 static struct intel_uncore_type snbep_uncore_r3qpi = {
1270 	.name		= "r3qpi",
1271 	.num_counters   = 3,
1272 	.num_boxes	= 2,
1273 	.perf_ctr_bits	= 44,
1274 	.constraints	= snbep_uncore_r3qpi_constraints,
1275 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1276 };
1277 
1278 enum {
1279 	SNBEP_PCI_UNCORE_HA,
1280 	SNBEP_PCI_UNCORE_IMC,
1281 	SNBEP_PCI_UNCORE_QPI,
1282 	SNBEP_PCI_UNCORE_R2PCIE,
1283 	SNBEP_PCI_UNCORE_R3QPI,
1284 };
1285 
1286 static struct intel_uncore_type *snbep_pci_uncores[] = {
1287 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1288 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1289 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1290 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1291 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1292 	NULL,
1293 };
1294 
1295 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1296 	{ /* Home Agent */
1297 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1298 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1299 	},
1300 	{ /* MC Channel 0 */
1301 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1302 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1303 	},
1304 	{ /* MC Channel 1 */
1305 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1306 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1307 	},
1308 	{ /* MC Channel 2 */
1309 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1310 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1311 	},
1312 	{ /* MC Channel 3 */
1313 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1314 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1315 	},
1316 	{ /* QPI Port 0 */
1317 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1318 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1319 	},
1320 	{ /* QPI Port 1 */
1321 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1322 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1323 	},
1324 	{ /* R2PCIe */
1325 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1326 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1327 	},
1328 	{ /* R3QPI Link 0 */
1329 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1330 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1331 	},
1332 	{ /* R3QPI Link 1 */
1333 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1334 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1335 	},
1336 	{ /* QPI Port 0 filter  */
1337 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1338 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1339 						   SNBEP_PCI_QPI_PORT0_FILTER),
1340 	},
1341 	{ /* QPI Port 0 filter  */
1342 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1343 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1344 						   SNBEP_PCI_QPI_PORT1_FILTER),
1345 	},
1346 	{ /* end: all zeroes */ }
1347 };
1348 
1349 static struct pci_driver snbep_uncore_pci_driver = {
1350 	.name		= "snbep_uncore",
1351 	.id_table	= snbep_uncore_pci_ids,
1352 };
1353 
1354 #define NODE_ID_MASK	0x7
1355 
1356 /*
1357  * build pci bus to socket mapping
1358  */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1359 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1360 {
1361 	struct pci_dev *ubox_dev = NULL;
1362 	int i, bus, nodeid, segment;
1363 	struct pci2phy_map *map;
1364 	int err = 0;
1365 	u32 config = 0;
1366 
1367 	while (1) {
1368 		/* find the UBOX device */
1369 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1370 		if (!ubox_dev)
1371 			break;
1372 		bus = ubox_dev->bus->number;
1373 		/* get the Node ID of the local register */
1374 		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1375 		if (err)
1376 			break;
1377 		nodeid = config & NODE_ID_MASK;
1378 		/* get the Node ID mapping */
1379 		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1380 		if (err)
1381 			break;
1382 
1383 		segment = pci_domain_nr(ubox_dev->bus);
1384 		raw_spin_lock(&pci2phy_map_lock);
1385 		map = __find_pci2phy_map(segment);
1386 		if (!map) {
1387 			raw_spin_unlock(&pci2phy_map_lock);
1388 			err = -ENOMEM;
1389 			break;
1390 		}
1391 
1392 		/*
1393 		 * every three bits in the Node ID mapping register maps
1394 		 * to a particular node.
1395 		 */
1396 		for (i = 0; i < 8; i++) {
1397 			if (nodeid == ((config >> (3 * i)) & 0x7)) {
1398 				map->pbus_to_physid[bus] = i;
1399 				break;
1400 			}
1401 		}
1402 		raw_spin_unlock(&pci2phy_map_lock);
1403 	}
1404 
1405 	if (!err) {
1406 		/*
1407 		 * For PCI bus with no UBOX device, find the next bus
1408 		 * that has UBOX device and use its mapping.
1409 		 */
1410 		raw_spin_lock(&pci2phy_map_lock);
1411 		list_for_each_entry(map, &pci2phy_map_head, list) {
1412 			i = -1;
1413 			if (reverse) {
1414 				for (bus = 255; bus >= 0; bus--) {
1415 					if (map->pbus_to_physid[bus] >= 0)
1416 						i = map->pbus_to_physid[bus];
1417 					else
1418 						map->pbus_to_physid[bus] = i;
1419 				}
1420 			} else {
1421 				for (bus = 0; bus <= 255; bus++) {
1422 					if (map->pbus_to_physid[bus] >= 0)
1423 						i = map->pbus_to_physid[bus];
1424 					else
1425 						map->pbus_to_physid[bus] = i;
1426 				}
1427 			}
1428 		}
1429 		raw_spin_unlock(&pci2phy_map_lock);
1430 	}
1431 
1432 	pci_dev_put(ubox_dev);
1433 
1434 	return err ? pcibios_err_to_errno(err) : 0;
1435 }
1436 
snbep_uncore_pci_init(void)1437 int snbep_uncore_pci_init(void)
1438 {
1439 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1440 	if (ret)
1441 		return ret;
1442 	uncore_pci_uncores = snbep_pci_uncores;
1443 	uncore_pci_driver = &snbep_uncore_pci_driver;
1444 	return 0;
1445 }
1446 /* end of Sandy Bridge-EP uncore support */
1447 
1448 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1449 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1450 {
1451 	unsigned msr = uncore_msr_box_ctl(box);
1452 	if (msr)
1453 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1454 }
1455 
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1456 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1457 {
1458 	struct pci_dev *pdev = box->pci_dev;
1459 
1460 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1461 }
1462 
1463 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1464 	.init_box	= ivbep_uncore_msr_init_box,		\
1465 	.disable_box	= snbep_uncore_msr_disable_box,		\
1466 	.enable_box	= snbep_uncore_msr_enable_box,		\
1467 	.disable_event	= snbep_uncore_msr_disable_event,	\
1468 	.enable_event	= snbep_uncore_msr_enable_event,	\
1469 	.read_counter	= uncore_msr_read_counter
1470 
1471 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1472 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1473 };
1474 
1475 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1476 	.init_box	= ivbep_uncore_pci_init_box,
1477 	.disable_box	= snbep_uncore_pci_disable_box,
1478 	.enable_box	= snbep_uncore_pci_enable_box,
1479 	.disable_event	= snbep_uncore_pci_disable_event,
1480 	.enable_event	= snbep_uncore_pci_enable_event,
1481 	.read_counter	= snbep_uncore_pci_read_counter,
1482 };
1483 
1484 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1485 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1486 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1487 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1488 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1489 	.ops		= &ivbep_uncore_pci_ops,			\
1490 	.format_group	= &ivbep_uncore_format_group
1491 
1492 static struct attribute *ivbep_uncore_formats_attr[] = {
1493 	&format_attr_event.attr,
1494 	&format_attr_umask.attr,
1495 	&format_attr_edge.attr,
1496 	&format_attr_inv.attr,
1497 	&format_attr_thresh8.attr,
1498 	NULL,
1499 };
1500 
1501 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1502 	&format_attr_event.attr,
1503 	&format_attr_umask.attr,
1504 	&format_attr_edge.attr,
1505 	&format_attr_inv.attr,
1506 	&format_attr_thresh5.attr,
1507 	NULL,
1508 };
1509 
1510 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1511 	&format_attr_event.attr,
1512 	&format_attr_umask.attr,
1513 	&format_attr_edge.attr,
1514 	&format_attr_tid_en.attr,
1515 	&format_attr_thresh8.attr,
1516 	&format_attr_filter_tid.attr,
1517 	&format_attr_filter_link.attr,
1518 	&format_attr_filter_state2.attr,
1519 	&format_attr_filter_nid2.attr,
1520 	&format_attr_filter_opc2.attr,
1521 	&format_attr_filter_nc.attr,
1522 	&format_attr_filter_c6.attr,
1523 	&format_attr_filter_isoc.attr,
1524 	NULL,
1525 };
1526 
1527 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1528 	&format_attr_event.attr,
1529 	&format_attr_occ_sel.attr,
1530 	&format_attr_edge.attr,
1531 	&format_attr_thresh5.attr,
1532 	&format_attr_occ_invert.attr,
1533 	&format_attr_occ_edge.attr,
1534 	&format_attr_filter_band0.attr,
1535 	&format_attr_filter_band1.attr,
1536 	&format_attr_filter_band2.attr,
1537 	&format_attr_filter_band3.attr,
1538 	NULL,
1539 };
1540 
1541 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1542 	&format_attr_event_ext.attr,
1543 	&format_attr_umask.attr,
1544 	&format_attr_edge.attr,
1545 	&format_attr_thresh8.attr,
1546 	&format_attr_match_rds.attr,
1547 	&format_attr_match_rnid30.attr,
1548 	&format_attr_match_rnid4.attr,
1549 	&format_attr_match_dnid.attr,
1550 	&format_attr_match_mc.attr,
1551 	&format_attr_match_opc.attr,
1552 	&format_attr_match_vnw.attr,
1553 	&format_attr_match0.attr,
1554 	&format_attr_match1.attr,
1555 	&format_attr_mask_rds.attr,
1556 	&format_attr_mask_rnid30.attr,
1557 	&format_attr_mask_rnid4.attr,
1558 	&format_attr_mask_dnid.attr,
1559 	&format_attr_mask_mc.attr,
1560 	&format_attr_mask_opc.attr,
1561 	&format_attr_mask_vnw.attr,
1562 	&format_attr_mask0.attr,
1563 	&format_attr_mask1.attr,
1564 	NULL,
1565 };
1566 
1567 static const struct attribute_group ivbep_uncore_format_group = {
1568 	.name = "format",
1569 	.attrs = ivbep_uncore_formats_attr,
1570 };
1571 
1572 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1573 	.name = "format",
1574 	.attrs = ivbep_uncore_ubox_formats_attr,
1575 };
1576 
1577 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1578 	.name = "format",
1579 	.attrs = ivbep_uncore_cbox_formats_attr,
1580 };
1581 
1582 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1583 	.name = "format",
1584 	.attrs = ivbep_uncore_pcu_formats_attr,
1585 };
1586 
1587 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1588 	.name = "format",
1589 	.attrs = ivbep_uncore_qpi_formats_attr,
1590 };
1591 
1592 static struct intel_uncore_type ivbep_uncore_ubox = {
1593 	.name		= "ubox",
1594 	.num_counters   = 2,
1595 	.num_boxes	= 1,
1596 	.perf_ctr_bits	= 44,
1597 	.fixed_ctr_bits	= 48,
1598 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1599 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1600 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1601 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1602 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1603 	.ops		= &ivbep_uncore_msr_ops,
1604 	.format_group	= &ivbep_uncore_ubox_format_group,
1605 };
1606 
1607 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1608 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1609 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1610 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1611 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1612 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1613 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1614 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1615 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1616 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1617 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1618 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1619 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1620 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1621 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1622 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1623 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1624 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1625 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1626 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1627 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1628 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1629 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1630 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1631 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1632 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1633 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1634 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1635 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1636 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1637 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1638 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1639 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1640 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1641 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1642 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1643 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1644 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1645 	EVENT_EXTRA_END
1646 };
1647 
ivbep_cbox_filter_mask(int fields)1648 static u64 ivbep_cbox_filter_mask(int fields)
1649 {
1650 	u64 mask = 0;
1651 
1652 	if (fields & 0x1)
1653 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1654 	if (fields & 0x2)
1655 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1656 	if (fields & 0x4)
1657 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1658 	if (fields & 0x8)
1659 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1660 	if (fields & 0x10) {
1661 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1662 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1663 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1664 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1665 	}
1666 
1667 	return mask;
1668 }
1669 
1670 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1671 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1672 {
1673 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1674 }
1675 
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1676 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1677 {
1678 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1679 	struct extra_reg *er;
1680 	int idx = 0;
1681 
1682 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1683 		if (er->event != (event->hw.config & er->config_mask))
1684 			continue;
1685 		idx |= er->idx;
1686 	}
1687 
1688 	if (idx) {
1689 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1690 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1691 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1692 		reg1->idx = idx;
1693 	}
1694 	return 0;
1695 }
1696 
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1697 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1698 {
1699 	struct hw_perf_event *hwc = &event->hw;
1700 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1701 
1702 	if (reg1->idx != EXTRA_REG_NONE) {
1703 		u64 filter = uncore_shared_reg_config(box, 0);
1704 		wrmsrl(reg1->reg, filter & 0xffffffff);
1705 		wrmsrl(reg1->reg + 6, filter >> 32);
1706 	}
1707 
1708 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1709 }
1710 
1711 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1712 	.init_box		= ivbep_uncore_msr_init_box,
1713 	.disable_box		= snbep_uncore_msr_disable_box,
1714 	.enable_box		= snbep_uncore_msr_enable_box,
1715 	.disable_event		= snbep_uncore_msr_disable_event,
1716 	.enable_event		= ivbep_cbox_enable_event,
1717 	.read_counter		= uncore_msr_read_counter,
1718 	.hw_config		= ivbep_cbox_hw_config,
1719 	.get_constraint		= ivbep_cbox_get_constraint,
1720 	.put_constraint		= snbep_cbox_put_constraint,
1721 };
1722 
1723 static struct intel_uncore_type ivbep_uncore_cbox = {
1724 	.name			= "cbox",
1725 	.num_counters		= 4,
1726 	.num_boxes		= 15,
1727 	.perf_ctr_bits		= 44,
1728 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1729 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1730 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1731 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1732 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1733 	.num_shared_regs	= 1,
1734 	.constraints		= snbep_uncore_cbox_constraints,
1735 	.ops			= &ivbep_uncore_cbox_ops,
1736 	.format_group		= &ivbep_uncore_cbox_format_group,
1737 };
1738 
1739 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1740 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1741 	.hw_config		= snbep_pcu_hw_config,
1742 	.get_constraint		= snbep_pcu_get_constraint,
1743 	.put_constraint		= snbep_pcu_put_constraint,
1744 };
1745 
1746 static struct intel_uncore_type ivbep_uncore_pcu = {
1747 	.name			= "pcu",
1748 	.num_counters		= 4,
1749 	.num_boxes		= 1,
1750 	.perf_ctr_bits		= 48,
1751 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1752 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1753 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1754 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1755 	.num_shared_regs	= 1,
1756 	.ops			= &ivbep_uncore_pcu_ops,
1757 	.format_group		= &ivbep_uncore_pcu_format_group,
1758 };
1759 
1760 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1761 	&ivbep_uncore_ubox,
1762 	&ivbep_uncore_cbox,
1763 	&ivbep_uncore_pcu,
1764 	NULL,
1765 };
1766 
ivbep_uncore_cpu_init(void)1767 void ivbep_uncore_cpu_init(void)
1768 {
1769 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1770 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1771 	uncore_msr_uncores = ivbep_msr_uncores;
1772 }
1773 
1774 static struct intel_uncore_type ivbep_uncore_ha = {
1775 	.name		= "ha",
1776 	.num_counters   = 4,
1777 	.num_boxes	= 2,
1778 	.perf_ctr_bits	= 48,
1779 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1780 };
1781 
1782 static struct intel_uncore_type ivbep_uncore_imc = {
1783 	.name		= "imc",
1784 	.num_counters   = 4,
1785 	.num_boxes	= 8,
1786 	.perf_ctr_bits	= 48,
1787 	.fixed_ctr_bits	= 48,
1788 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1789 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1790 	.event_descs	= snbep_uncore_imc_events,
1791 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1792 };
1793 
1794 /* registers in IRP boxes are not properly aligned */
1795 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1796 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1797 
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1798 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1799 {
1800 	struct pci_dev *pdev = box->pci_dev;
1801 	struct hw_perf_event *hwc = &event->hw;
1802 
1803 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1804 			       hwc->config | SNBEP_PMON_CTL_EN);
1805 }
1806 
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1807 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1808 {
1809 	struct pci_dev *pdev = box->pci_dev;
1810 	struct hw_perf_event *hwc = &event->hw;
1811 
1812 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1813 }
1814 
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1815 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1816 {
1817 	struct pci_dev *pdev = box->pci_dev;
1818 	struct hw_perf_event *hwc = &event->hw;
1819 	u64 count = 0;
1820 
1821 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1822 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1823 
1824 	return count;
1825 }
1826 
1827 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1828 	.init_box	= ivbep_uncore_pci_init_box,
1829 	.disable_box	= snbep_uncore_pci_disable_box,
1830 	.enable_box	= snbep_uncore_pci_enable_box,
1831 	.disable_event	= ivbep_uncore_irp_disable_event,
1832 	.enable_event	= ivbep_uncore_irp_enable_event,
1833 	.read_counter	= ivbep_uncore_irp_read_counter,
1834 };
1835 
1836 static struct intel_uncore_type ivbep_uncore_irp = {
1837 	.name			= "irp",
1838 	.num_counters		= 4,
1839 	.num_boxes		= 1,
1840 	.perf_ctr_bits		= 48,
1841 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1842 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1843 	.ops			= &ivbep_uncore_irp_ops,
1844 	.format_group		= &ivbep_uncore_format_group,
1845 };
1846 
1847 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1848 	.init_box	= ivbep_uncore_pci_init_box,
1849 	.disable_box	= snbep_uncore_pci_disable_box,
1850 	.enable_box	= snbep_uncore_pci_enable_box,
1851 	.disable_event	= snbep_uncore_pci_disable_event,
1852 	.enable_event	= snbep_qpi_enable_event,
1853 	.read_counter	= snbep_uncore_pci_read_counter,
1854 	.hw_config	= snbep_qpi_hw_config,
1855 	.get_constraint	= uncore_get_constraint,
1856 	.put_constraint	= uncore_put_constraint,
1857 };
1858 
1859 static struct intel_uncore_type ivbep_uncore_qpi = {
1860 	.name			= "qpi",
1861 	.num_counters		= 4,
1862 	.num_boxes		= 3,
1863 	.perf_ctr_bits		= 48,
1864 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1865 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1866 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1867 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1868 	.num_shared_regs	= 1,
1869 	.ops			= &ivbep_uncore_qpi_ops,
1870 	.format_group		= &ivbep_uncore_qpi_format_group,
1871 };
1872 
1873 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1874 	.name		= "r2pcie",
1875 	.num_counters   = 4,
1876 	.num_boxes	= 1,
1877 	.perf_ctr_bits	= 44,
1878 	.constraints	= snbep_uncore_r2pcie_constraints,
1879 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1880 };
1881 
1882 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1883 	.name		= "r3qpi",
1884 	.num_counters   = 3,
1885 	.num_boxes	= 2,
1886 	.perf_ctr_bits	= 44,
1887 	.constraints	= snbep_uncore_r3qpi_constraints,
1888 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1889 };
1890 
1891 enum {
1892 	IVBEP_PCI_UNCORE_HA,
1893 	IVBEP_PCI_UNCORE_IMC,
1894 	IVBEP_PCI_UNCORE_IRP,
1895 	IVBEP_PCI_UNCORE_QPI,
1896 	IVBEP_PCI_UNCORE_R2PCIE,
1897 	IVBEP_PCI_UNCORE_R3QPI,
1898 };
1899 
1900 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1901 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1902 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1903 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1904 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1905 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1906 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1907 	NULL,
1908 };
1909 
1910 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1911 	{ /* Home Agent 0 */
1912 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1913 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1914 	},
1915 	{ /* Home Agent 1 */
1916 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1917 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1918 	},
1919 	{ /* MC0 Channel 0 */
1920 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1921 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1922 	},
1923 	{ /* MC0 Channel 1 */
1924 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1925 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1926 	},
1927 	{ /* MC0 Channel 3 */
1928 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1929 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1930 	},
1931 	{ /* MC0 Channel 4 */
1932 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1933 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1934 	},
1935 	{ /* MC1 Channel 0 */
1936 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1937 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1938 	},
1939 	{ /* MC1 Channel 1 */
1940 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1941 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1942 	},
1943 	{ /* MC1 Channel 3 */
1944 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1945 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1946 	},
1947 	{ /* MC1 Channel 4 */
1948 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1949 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1950 	},
1951 	{ /* IRP */
1952 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1953 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1954 	},
1955 	{ /* QPI0 Port 0 */
1956 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1957 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1958 	},
1959 	{ /* QPI0 Port 1 */
1960 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1961 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1962 	},
1963 	{ /* QPI1 Port 2 */
1964 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1965 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1966 	},
1967 	{ /* R2PCIe */
1968 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1969 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1970 	},
1971 	{ /* R3QPI0 Link 0 */
1972 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1973 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1974 	},
1975 	{ /* R3QPI0 Link 1 */
1976 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1977 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1978 	},
1979 	{ /* R3QPI1 Link 2 */
1980 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1981 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1982 	},
1983 	{ /* QPI Port 0 filter  */
1984 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1985 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1986 						   SNBEP_PCI_QPI_PORT0_FILTER),
1987 	},
1988 	{ /* QPI Port 0 filter  */
1989 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1990 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1991 						   SNBEP_PCI_QPI_PORT1_FILTER),
1992 	},
1993 	{ /* end: all zeroes */ }
1994 };
1995 
1996 static struct pci_driver ivbep_uncore_pci_driver = {
1997 	.name		= "ivbep_uncore",
1998 	.id_table	= ivbep_uncore_pci_ids,
1999 };
2000 
ivbep_uncore_pci_init(void)2001 int ivbep_uncore_pci_init(void)
2002 {
2003 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2004 	if (ret)
2005 		return ret;
2006 	uncore_pci_uncores = ivbep_pci_uncores;
2007 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2008 	return 0;
2009 }
2010 /* end of IvyTown uncore support */
2011 
2012 /* KNL uncore support */
2013 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2014 	&format_attr_event.attr,
2015 	&format_attr_umask.attr,
2016 	&format_attr_edge.attr,
2017 	&format_attr_tid_en.attr,
2018 	&format_attr_inv.attr,
2019 	&format_attr_thresh5.attr,
2020 	NULL,
2021 };
2022 
2023 static const struct attribute_group knl_uncore_ubox_format_group = {
2024 	.name = "format",
2025 	.attrs = knl_uncore_ubox_formats_attr,
2026 };
2027 
2028 static struct intel_uncore_type knl_uncore_ubox = {
2029 	.name			= "ubox",
2030 	.num_counters		= 2,
2031 	.num_boxes		= 1,
2032 	.perf_ctr_bits		= 48,
2033 	.fixed_ctr_bits		= 48,
2034 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2035 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2036 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2037 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2038 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2039 	.ops			= &snbep_uncore_msr_ops,
2040 	.format_group		= &knl_uncore_ubox_format_group,
2041 };
2042 
2043 static struct attribute *knl_uncore_cha_formats_attr[] = {
2044 	&format_attr_event.attr,
2045 	&format_attr_umask.attr,
2046 	&format_attr_qor.attr,
2047 	&format_attr_edge.attr,
2048 	&format_attr_tid_en.attr,
2049 	&format_attr_inv.attr,
2050 	&format_attr_thresh8.attr,
2051 	&format_attr_filter_tid4.attr,
2052 	&format_attr_filter_link3.attr,
2053 	&format_attr_filter_state4.attr,
2054 	&format_attr_filter_local.attr,
2055 	&format_attr_filter_all_op.attr,
2056 	&format_attr_filter_nnm.attr,
2057 	&format_attr_filter_opc3.attr,
2058 	&format_attr_filter_nc.attr,
2059 	&format_attr_filter_isoc.attr,
2060 	NULL,
2061 };
2062 
2063 static const struct attribute_group knl_uncore_cha_format_group = {
2064 	.name = "format",
2065 	.attrs = knl_uncore_cha_formats_attr,
2066 };
2067 
2068 static struct event_constraint knl_uncore_cha_constraints[] = {
2069 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2070 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2071 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2072 	EVENT_CONSTRAINT_END
2073 };
2074 
2075 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2076 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2077 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2078 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2079 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2080 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2081 	EVENT_EXTRA_END
2082 };
2083 
knl_cha_filter_mask(int fields)2084 static u64 knl_cha_filter_mask(int fields)
2085 {
2086 	u64 mask = 0;
2087 
2088 	if (fields & 0x1)
2089 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2090 	if (fields & 0x2)
2091 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2092 	if (fields & 0x4)
2093 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2094 	return mask;
2095 }
2096 
2097 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2098 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2099 {
2100 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2101 }
2102 
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2103 static int knl_cha_hw_config(struct intel_uncore_box *box,
2104 			     struct perf_event *event)
2105 {
2106 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2107 	struct extra_reg *er;
2108 	int idx = 0;
2109 
2110 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2111 		if (er->event != (event->hw.config & er->config_mask))
2112 			continue;
2113 		idx |= er->idx;
2114 	}
2115 
2116 	if (idx) {
2117 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2118 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2119 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2120 
2121 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2122 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2123 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2124 		reg1->idx = idx;
2125 	}
2126 	return 0;
2127 }
2128 
2129 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2130 				    struct perf_event *event);
2131 
2132 static struct intel_uncore_ops knl_uncore_cha_ops = {
2133 	.init_box		= snbep_uncore_msr_init_box,
2134 	.disable_box		= snbep_uncore_msr_disable_box,
2135 	.enable_box		= snbep_uncore_msr_enable_box,
2136 	.disable_event		= snbep_uncore_msr_disable_event,
2137 	.enable_event		= hswep_cbox_enable_event,
2138 	.read_counter		= uncore_msr_read_counter,
2139 	.hw_config		= knl_cha_hw_config,
2140 	.get_constraint		= knl_cha_get_constraint,
2141 	.put_constraint		= snbep_cbox_put_constraint,
2142 };
2143 
2144 static struct intel_uncore_type knl_uncore_cha = {
2145 	.name			= "cha",
2146 	.num_counters		= 4,
2147 	.num_boxes		= 38,
2148 	.perf_ctr_bits		= 48,
2149 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2150 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2151 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2152 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2153 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2154 	.num_shared_regs	= 1,
2155 	.constraints		= knl_uncore_cha_constraints,
2156 	.ops			= &knl_uncore_cha_ops,
2157 	.format_group		= &knl_uncore_cha_format_group,
2158 };
2159 
2160 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2161 	&format_attr_event2.attr,
2162 	&format_attr_use_occ_ctr.attr,
2163 	&format_attr_occ_sel.attr,
2164 	&format_attr_edge.attr,
2165 	&format_attr_tid_en.attr,
2166 	&format_attr_inv.attr,
2167 	&format_attr_thresh6.attr,
2168 	&format_attr_occ_invert.attr,
2169 	&format_attr_occ_edge_det.attr,
2170 	NULL,
2171 };
2172 
2173 static const struct attribute_group knl_uncore_pcu_format_group = {
2174 	.name = "format",
2175 	.attrs = knl_uncore_pcu_formats_attr,
2176 };
2177 
2178 static struct intel_uncore_type knl_uncore_pcu = {
2179 	.name			= "pcu",
2180 	.num_counters		= 4,
2181 	.num_boxes		= 1,
2182 	.perf_ctr_bits		= 48,
2183 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2184 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2185 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2186 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2187 	.ops			= &snbep_uncore_msr_ops,
2188 	.format_group		= &knl_uncore_pcu_format_group,
2189 };
2190 
2191 static struct intel_uncore_type *knl_msr_uncores[] = {
2192 	&knl_uncore_ubox,
2193 	&knl_uncore_cha,
2194 	&knl_uncore_pcu,
2195 	NULL,
2196 };
2197 
knl_uncore_cpu_init(void)2198 void knl_uncore_cpu_init(void)
2199 {
2200 	uncore_msr_uncores = knl_msr_uncores;
2201 }
2202 
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2203 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2204 {
2205 	struct pci_dev *pdev = box->pci_dev;
2206 	int box_ctl = uncore_pci_box_ctl(box);
2207 
2208 	pci_write_config_dword(pdev, box_ctl, 0);
2209 }
2210 
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2211 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2212 					struct perf_event *event)
2213 {
2214 	struct pci_dev *pdev = box->pci_dev;
2215 	struct hw_perf_event *hwc = &event->hw;
2216 
2217 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2218 							== UNCORE_FIXED_EVENT)
2219 		pci_write_config_dword(pdev, hwc->config_base,
2220 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2221 	else
2222 		pci_write_config_dword(pdev, hwc->config_base,
2223 				       hwc->config | SNBEP_PMON_CTL_EN);
2224 }
2225 
2226 static struct intel_uncore_ops knl_uncore_imc_ops = {
2227 	.init_box	= snbep_uncore_pci_init_box,
2228 	.disable_box	= snbep_uncore_pci_disable_box,
2229 	.enable_box	= knl_uncore_imc_enable_box,
2230 	.read_counter	= snbep_uncore_pci_read_counter,
2231 	.enable_event	= knl_uncore_imc_enable_event,
2232 	.disable_event	= snbep_uncore_pci_disable_event,
2233 };
2234 
2235 static struct intel_uncore_type knl_uncore_imc_uclk = {
2236 	.name			= "imc_uclk",
2237 	.num_counters		= 4,
2238 	.num_boxes		= 2,
2239 	.perf_ctr_bits		= 48,
2240 	.fixed_ctr_bits		= 48,
2241 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2242 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2243 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2244 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2245 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2246 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2247 	.ops			= &knl_uncore_imc_ops,
2248 	.format_group		= &snbep_uncore_format_group,
2249 };
2250 
2251 static struct intel_uncore_type knl_uncore_imc_dclk = {
2252 	.name			= "imc",
2253 	.num_counters		= 4,
2254 	.num_boxes		= 6,
2255 	.perf_ctr_bits		= 48,
2256 	.fixed_ctr_bits		= 48,
2257 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2258 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2259 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2260 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2261 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2262 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2263 	.ops			= &knl_uncore_imc_ops,
2264 	.format_group		= &snbep_uncore_format_group,
2265 };
2266 
2267 static struct intel_uncore_type knl_uncore_edc_uclk = {
2268 	.name			= "edc_uclk",
2269 	.num_counters		= 4,
2270 	.num_boxes		= 8,
2271 	.perf_ctr_bits		= 48,
2272 	.fixed_ctr_bits		= 48,
2273 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2274 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2275 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2276 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2277 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2278 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2279 	.ops			= &knl_uncore_imc_ops,
2280 	.format_group		= &snbep_uncore_format_group,
2281 };
2282 
2283 static struct intel_uncore_type knl_uncore_edc_eclk = {
2284 	.name			= "edc_eclk",
2285 	.num_counters		= 4,
2286 	.num_boxes		= 8,
2287 	.perf_ctr_bits		= 48,
2288 	.fixed_ctr_bits		= 48,
2289 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2290 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2291 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2292 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2293 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2294 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2295 	.ops			= &knl_uncore_imc_ops,
2296 	.format_group		= &snbep_uncore_format_group,
2297 };
2298 
2299 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2300 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2301 	EVENT_CONSTRAINT_END
2302 };
2303 
2304 static struct intel_uncore_type knl_uncore_m2pcie = {
2305 	.name		= "m2pcie",
2306 	.num_counters   = 4,
2307 	.num_boxes	= 1,
2308 	.perf_ctr_bits	= 48,
2309 	.constraints	= knl_uncore_m2pcie_constraints,
2310 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2311 };
2312 
2313 static struct attribute *knl_uncore_irp_formats_attr[] = {
2314 	&format_attr_event.attr,
2315 	&format_attr_umask.attr,
2316 	&format_attr_qor.attr,
2317 	&format_attr_edge.attr,
2318 	&format_attr_inv.attr,
2319 	&format_attr_thresh8.attr,
2320 	NULL,
2321 };
2322 
2323 static const struct attribute_group knl_uncore_irp_format_group = {
2324 	.name = "format",
2325 	.attrs = knl_uncore_irp_formats_attr,
2326 };
2327 
2328 static struct intel_uncore_type knl_uncore_irp = {
2329 	.name			= "irp",
2330 	.num_counters		= 2,
2331 	.num_boxes		= 1,
2332 	.perf_ctr_bits		= 48,
2333 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2334 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2335 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2336 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2337 	.ops			= &snbep_uncore_pci_ops,
2338 	.format_group		= &knl_uncore_irp_format_group,
2339 };
2340 
2341 enum {
2342 	KNL_PCI_UNCORE_MC_UCLK,
2343 	KNL_PCI_UNCORE_MC_DCLK,
2344 	KNL_PCI_UNCORE_EDC_UCLK,
2345 	KNL_PCI_UNCORE_EDC_ECLK,
2346 	KNL_PCI_UNCORE_M2PCIE,
2347 	KNL_PCI_UNCORE_IRP,
2348 };
2349 
2350 static struct intel_uncore_type *knl_pci_uncores[] = {
2351 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2352 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2353 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2354 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2355 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2356 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2357 	NULL,
2358 };
2359 
2360 /*
2361  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2362  * device type. prior to KNL, each instance of a PMU device type had a unique
2363  * device ID.
2364  *
2365  *	PCI Device ID	Uncore PMU Devices
2366  *	----------------------------------
2367  *	0x7841		MC0 UClk, MC1 UClk
2368  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2369  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2370  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2371  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2372  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2373  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2374  *	0x7817		M2PCIe
2375  *	0x7814		IRP
2376 */
2377 
2378 static const struct pci_device_id knl_uncore_pci_ids[] = {
2379 	{ /* MC0 UClk */
2380 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2381 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2382 	},
2383 	{ /* MC1 UClk */
2384 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2385 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2386 	},
2387 	{ /* MC0 DClk CH 0 */
2388 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2389 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2390 	},
2391 	{ /* MC0 DClk CH 1 */
2392 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2393 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2394 	},
2395 	{ /* MC0 DClk CH 2 */
2396 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2397 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2398 	},
2399 	{ /* MC1 DClk CH 0 */
2400 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2401 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2402 	},
2403 	{ /* MC1 DClk CH 1 */
2404 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2405 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2406 	},
2407 	{ /* MC1 DClk CH 2 */
2408 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2409 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2410 	},
2411 	{ /* EDC0 UClk */
2412 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2413 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2414 	},
2415 	{ /* EDC1 UClk */
2416 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2417 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2418 	},
2419 	{ /* EDC2 UClk */
2420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2421 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2422 	},
2423 	{ /* EDC3 UClk */
2424 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2425 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2426 	},
2427 	{ /* EDC4 UClk */
2428 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2429 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2430 	},
2431 	{ /* EDC5 UClk */
2432 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2433 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2434 	},
2435 	{ /* EDC6 UClk */
2436 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2437 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2438 	},
2439 	{ /* EDC7 UClk */
2440 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2441 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2442 	},
2443 	{ /* EDC0 EClk */
2444 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2445 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2446 	},
2447 	{ /* EDC1 EClk */
2448 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2449 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2450 	},
2451 	{ /* EDC2 EClk */
2452 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2453 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2454 	},
2455 	{ /* EDC3 EClk */
2456 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2457 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2458 	},
2459 	{ /* EDC4 EClk */
2460 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2461 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2462 	},
2463 	{ /* EDC5 EClk */
2464 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2465 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2466 	},
2467 	{ /* EDC6 EClk */
2468 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2469 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2470 	},
2471 	{ /* EDC7 EClk */
2472 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2473 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2474 	},
2475 	{ /* M2PCIe */
2476 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2477 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2478 	},
2479 	{ /* IRP */
2480 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2481 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2482 	},
2483 	{ /* end: all zeroes */ }
2484 };
2485 
2486 static struct pci_driver knl_uncore_pci_driver = {
2487 	.name		= "knl_uncore",
2488 	.id_table	= knl_uncore_pci_ids,
2489 };
2490 
knl_uncore_pci_init(void)2491 int knl_uncore_pci_init(void)
2492 {
2493 	int ret;
2494 
2495 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2496 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2497 	if (ret)
2498 		return ret;
2499 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2500 	if (ret)
2501 		return ret;
2502 	uncore_pci_uncores = knl_pci_uncores;
2503 	uncore_pci_driver = &knl_uncore_pci_driver;
2504 	return 0;
2505 }
2506 
2507 /* end of KNL uncore support */
2508 
2509 /* Haswell-EP uncore support */
2510 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2511 	&format_attr_event.attr,
2512 	&format_attr_umask.attr,
2513 	&format_attr_edge.attr,
2514 	&format_attr_inv.attr,
2515 	&format_attr_thresh5.attr,
2516 	&format_attr_filter_tid2.attr,
2517 	&format_attr_filter_cid.attr,
2518 	NULL,
2519 };
2520 
2521 static const struct attribute_group hswep_uncore_ubox_format_group = {
2522 	.name = "format",
2523 	.attrs = hswep_uncore_ubox_formats_attr,
2524 };
2525 
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2526 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2527 {
2528 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2529 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2530 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2531 	reg1->idx = 0;
2532 	return 0;
2533 }
2534 
2535 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2536 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2537 	.hw_config		= hswep_ubox_hw_config,
2538 	.get_constraint		= uncore_get_constraint,
2539 	.put_constraint		= uncore_put_constraint,
2540 };
2541 
2542 static struct intel_uncore_type hswep_uncore_ubox = {
2543 	.name			= "ubox",
2544 	.num_counters		= 2,
2545 	.num_boxes		= 1,
2546 	.perf_ctr_bits		= 44,
2547 	.fixed_ctr_bits		= 48,
2548 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2549 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2550 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2551 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2552 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2553 	.num_shared_regs	= 1,
2554 	.ops			= &hswep_uncore_ubox_ops,
2555 	.format_group		= &hswep_uncore_ubox_format_group,
2556 };
2557 
2558 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2559 	&format_attr_event.attr,
2560 	&format_attr_umask.attr,
2561 	&format_attr_edge.attr,
2562 	&format_attr_tid_en.attr,
2563 	&format_attr_thresh8.attr,
2564 	&format_attr_filter_tid3.attr,
2565 	&format_attr_filter_link2.attr,
2566 	&format_attr_filter_state3.attr,
2567 	&format_attr_filter_nid2.attr,
2568 	&format_attr_filter_opc2.attr,
2569 	&format_attr_filter_nc.attr,
2570 	&format_attr_filter_c6.attr,
2571 	&format_attr_filter_isoc.attr,
2572 	NULL,
2573 };
2574 
2575 static const struct attribute_group hswep_uncore_cbox_format_group = {
2576 	.name = "format",
2577 	.attrs = hswep_uncore_cbox_formats_attr,
2578 };
2579 
2580 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2581 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2582 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2583 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2584 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2585 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2586 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2587 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2588 	EVENT_CONSTRAINT_END
2589 };
2590 
2591 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2592 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2593 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2594 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2595 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2596 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2597 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2598 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2599 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2600 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2601 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2602 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2603 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2604 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2605 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2606 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2607 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2608 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2609 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2610 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2611 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2612 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2613 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2614 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2615 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2616 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2617 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2618 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2619 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2620 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2621 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2622 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2623 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2624 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2625 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2626 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2627 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2628 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2629 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2630 	EVENT_EXTRA_END
2631 };
2632 
hswep_cbox_filter_mask(int fields)2633 static u64 hswep_cbox_filter_mask(int fields)
2634 {
2635 	u64 mask = 0;
2636 	if (fields & 0x1)
2637 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2638 	if (fields & 0x2)
2639 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2640 	if (fields & 0x4)
2641 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2642 	if (fields & 0x8)
2643 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2644 	if (fields & 0x10) {
2645 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2646 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2647 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2648 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2649 	}
2650 	return mask;
2651 }
2652 
2653 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2654 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2655 {
2656 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2657 }
2658 
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2659 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2660 {
2661 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2662 	struct extra_reg *er;
2663 	int idx = 0;
2664 
2665 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2666 		if (er->event != (event->hw.config & er->config_mask))
2667 			continue;
2668 		idx |= er->idx;
2669 	}
2670 
2671 	if (idx) {
2672 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2673 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2674 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2675 		reg1->idx = idx;
2676 	}
2677 	return 0;
2678 }
2679 
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2680 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2681 				  struct perf_event *event)
2682 {
2683 	struct hw_perf_event *hwc = &event->hw;
2684 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2685 
2686 	if (reg1->idx != EXTRA_REG_NONE) {
2687 		u64 filter = uncore_shared_reg_config(box, 0);
2688 		wrmsrl(reg1->reg, filter & 0xffffffff);
2689 		wrmsrl(reg1->reg + 1, filter >> 32);
2690 	}
2691 
2692 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2693 }
2694 
2695 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2696 	.init_box		= snbep_uncore_msr_init_box,
2697 	.disable_box		= snbep_uncore_msr_disable_box,
2698 	.enable_box		= snbep_uncore_msr_enable_box,
2699 	.disable_event		= snbep_uncore_msr_disable_event,
2700 	.enable_event		= hswep_cbox_enable_event,
2701 	.read_counter		= uncore_msr_read_counter,
2702 	.hw_config		= hswep_cbox_hw_config,
2703 	.get_constraint		= hswep_cbox_get_constraint,
2704 	.put_constraint		= snbep_cbox_put_constraint,
2705 };
2706 
2707 static struct intel_uncore_type hswep_uncore_cbox = {
2708 	.name			= "cbox",
2709 	.num_counters		= 4,
2710 	.num_boxes		= 18,
2711 	.perf_ctr_bits		= 48,
2712 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2713 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2714 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2715 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2716 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2717 	.num_shared_regs	= 1,
2718 	.constraints		= hswep_uncore_cbox_constraints,
2719 	.ops			= &hswep_uncore_cbox_ops,
2720 	.format_group		= &hswep_uncore_cbox_format_group,
2721 };
2722 
2723 /*
2724  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2725  */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2726 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2727 {
2728 	unsigned msr = uncore_msr_box_ctl(box);
2729 
2730 	if (msr) {
2731 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2732 		u64 flags = 0;
2733 		int i;
2734 
2735 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2736 			flags |= (1ULL << i);
2737 			wrmsrl(msr, flags);
2738 		}
2739 	}
2740 }
2741 
2742 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2743 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2744 	.init_box		= hswep_uncore_sbox_msr_init_box
2745 };
2746 
2747 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2748 	&format_attr_event.attr,
2749 	&format_attr_umask.attr,
2750 	&format_attr_edge.attr,
2751 	&format_attr_tid_en.attr,
2752 	&format_attr_inv.attr,
2753 	&format_attr_thresh8.attr,
2754 	NULL,
2755 };
2756 
2757 static const struct attribute_group hswep_uncore_sbox_format_group = {
2758 	.name = "format",
2759 	.attrs = hswep_uncore_sbox_formats_attr,
2760 };
2761 
2762 static struct intel_uncore_type hswep_uncore_sbox = {
2763 	.name			= "sbox",
2764 	.num_counters		= 4,
2765 	.num_boxes		= 4,
2766 	.perf_ctr_bits		= 44,
2767 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2768 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2769 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2770 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2771 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2772 	.ops			= &hswep_uncore_sbox_msr_ops,
2773 	.format_group		= &hswep_uncore_sbox_format_group,
2774 };
2775 
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2776 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2777 {
2778 	struct hw_perf_event *hwc = &event->hw;
2779 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2780 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2781 
2782 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2783 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2784 		reg1->idx = ev_sel - 0xb;
2785 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2786 	}
2787 	return 0;
2788 }
2789 
2790 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2791 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2792 	.hw_config		= hswep_pcu_hw_config,
2793 	.get_constraint		= snbep_pcu_get_constraint,
2794 	.put_constraint		= snbep_pcu_put_constraint,
2795 };
2796 
2797 static struct intel_uncore_type hswep_uncore_pcu = {
2798 	.name			= "pcu",
2799 	.num_counters		= 4,
2800 	.num_boxes		= 1,
2801 	.perf_ctr_bits		= 48,
2802 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2803 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2804 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2805 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2806 	.num_shared_regs	= 1,
2807 	.ops			= &hswep_uncore_pcu_ops,
2808 	.format_group		= &snbep_uncore_pcu_format_group,
2809 };
2810 
2811 static struct intel_uncore_type *hswep_msr_uncores[] = {
2812 	&hswep_uncore_ubox,
2813 	&hswep_uncore_cbox,
2814 	&hswep_uncore_sbox,
2815 	&hswep_uncore_pcu,
2816 	NULL,
2817 };
2818 
hswep_uncore_cpu_init(void)2819 void hswep_uncore_cpu_init(void)
2820 {
2821 	int pkg = boot_cpu_data.logical_proc_id;
2822 
2823 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2824 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2825 
2826 	/* Detect 6-8 core systems with only two SBOXes */
2827 	if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2828 		u32 capid4;
2829 
2830 		pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2831 				      0x94, &capid4);
2832 		if (((capid4 >> 6) & 0x3) == 0)
2833 			hswep_uncore_sbox.num_boxes = 2;
2834 	}
2835 
2836 	uncore_msr_uncores = hswep_msr_uncores;
2837 }
2838 
2839 static struct intel_uncore_type hswep_uncore_ha = {
2840 	.name		= "ha",
2841 	.num_counters   = 4,
2842 	.num_boxes	= 2,
2843 	.perf_ctr_bits	= 48,
2844 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2845 };
2846 
2847 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2848 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2849 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2850 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2851 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2852 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2853 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2854 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2855 	{ /* end: all zeroes */ },
2856 };
2857 
2858 static struct intel_uncore_type hswep_uncore_imc = {
2859 	.name		= "imc",
2860 	.num_counters   = 4,
2861 	.num_boxes	= 8,
2862 	.perf_ctr_bits	= 48,
2863 	.fixed_ctr_bits	= 48,
2864 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2865 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2866 	.event_descs	= hswep_uncore_imc_events,
2867 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2868 };
2869 
2870 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2871 
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2872 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2873 {
2874 	struct pci_dev *pdev = box->pci_dev;
2875 	struct hw_perf_event *hwc = &event->hw;
2876 	u64 count = 0;
2877 
2878 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2879 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2880 
2881 	return count;
2882 }
2883 
2884 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2885 	.init_box	= snbep_uncore_pci_init_box,
2886 	.disable_box	= snbep_uncore_pci_disable_box,
2887 	.enable_box	= snbep_uncore_pci_enable_box,
2888 	.disable_event	= ivbep_uncore_irp_disable_event,
2889 	.enable_event	= ivbep_uncore_irp_enable_event,
2890 	.read_counter	= hswep_uncore_irp_read_counter,
2891 };
2892 
2893 static struct intel_uncore_type hswep_uncore_irp = {
2894 	.name			= "irp",
2895 	.num_counters		= 4,
2896 	.num_boxes		= 1,
2897 	.perf_ctr_bits		= 48,
2898 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2899 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2900 	.ops			= &hswep_uncore_irp_ops,
2901 	.format_group		= &snbep_uncore_format_group,
2902 };
2903 
2904 static struct intel_uncore_type hswep_uncore_qpi = {
2905 	.name			= "qpi",
2906 	.num_counters		= 4,
2907 	.num_boxes		= 3,
2908 	.perf_ctr_bits		= 48,
2909 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2910 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2911 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2912 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2913 	.num_shared_regs	= 1,
2914 	.ops			= &snbep_uncore_qpi_ops,
2915 	.format_group		= &snbep_uncore_qpi_format_group,
2916 };
2917 
2918 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2919 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2920 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2921 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2922 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2923 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2924 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2925 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2926 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2927 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2928 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2929 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2930 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2931 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2932 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2933 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2934 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2935 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2936 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2937 	EVENT_CONSTRAINT_END
2938 };
2939 
2940 static struct intel_uncore_type hswep_uncore_r2pcie = {
2941 	.name		= "r2pcie",
2942 	.num_counters   = 4,
2943 	.num_boxes	= 1,
2944 	.perf_ctr_bits	= 48,
2945 	.constraints	= hswep_uncore_r2pcie_constraints,
2946 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2947 };
2948 
2949 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2950 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2951 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2952 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2953 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2954 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2955 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2956 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2957 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2958 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2959 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2960 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2961 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2962 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2963 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2964 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2965 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2966 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2967 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2968 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2969 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2970 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2971 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2972 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2973 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2974 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2975 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2976 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2977 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2978 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2979 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2980 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2981 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2982 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2983 	EVENT_CONSTRAINT_END
2984 };
2985 
2986 static struct intel_uncore_type hswep_uncore_r3qpi = {
2987 	.name		= "r3qpi",
2988 	.num_counters   = 3,
2989 	.num_boxes	= 3,
2990 	.perf_ctr_bits	= 44,
2991 	.constraints	= hswep_uncore_r3qpi_constraints,
2992 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2993 };
2994 
2995 enum {
2996 	HSWEP_PCI_UNCORE_HA,
2997 	HSWEP_PCI_UNCORE_IMC,
2998 	HSWEP_PCI_UNCORE_IRP,
2999 	HSWEP_PCI_UNCORE_QPI,
3000 	HSWEP_PCI_UNCORE_R2PCIE,
3001 	HSWEP_PCI_UNCORE_R3QPI,
3002 };
3003 
3004 static struct intel_uncore_type *hswep_pci_uncores[] = {
3005 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3006 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3007 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3008 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3009 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3010 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3011 	NULL,
3012 };
3013 
3014 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3015 	{ /* Home Agent 0 */
3016 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3017 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3018 	},
3019 	{ /* Home Agent 1 */
3020 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3021 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3022 	},
3023 	{ /* MC0 Channel 0 */
3024 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3025 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3026 	},
3027 	{ /* MC0 Channel 1 */
3028 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3029 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3030 	},
3031 	{ /* MC0 Channel 2 */
3032 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3033 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3034 	},
3035 	{ /* MC0 Channel 3 */
3036 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3037 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3038 	},
3039 	{ /* MC1 Channel 0 */
3040 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3041 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3042 	},
3043 	{ /* MC1 Channel 1 */
3044 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3045 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3046 	},
3047 	{ /* MC1 Channel 2 */
3048 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3049 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3050 	},
3051 	{ /* MC1 Channel 3 */
3052 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3053 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3054 	},
3055 	{ /* IRP */
3056 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3057 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3058 	},
3059 	{ /* QPI0 Port 0 */
3060 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3061 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3062 	},
3063 	{ /* QPI0 Port 1 */
3064 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3065 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3066 	},
3067 	{ /* QPI1 Port 2 */
3068 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3069 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3070 	},
3071 	{ /* R2PCIe */
3072 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3073 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3074 	},
3075 	{ /* R3QPI0 Link 0 */
3076 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3077 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3078 	},
3079 	{ /* R3QPI0 Link 1 */
3080 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3081 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3082 	},
3083 	{ /* R3QPI1 Link 2 */
3084 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3085 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3086 	},
3087 	{ /* QPI Port 0 filter  */
3088 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3089 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3090 						   SNBEP_PCI_QPI_PORT0_FILTER),
3091 	},
3092 	{ /* QPI Port 1 filter  */
3093 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3094 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3095 						   SNBEP_PCI_QPI_PORT1_FILTER),
3096 	},
3097 	{ /* PCU.3 (for Capability registers) */
3098 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3099 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3100 						   HSWEP_PCI_PCU_3),
3101 	},
3102 	{ /* end: all zeroes */ }
3103 };
3104 
3105 static struct pci_driver hswep_uncore_pci_driver = {
3106 	.name		= "hswep_uncore",
3107 	.id_table	= hswep_uncore_pci_ids,
3108 };
3109 
hswep_uncore_pci_init(void)3110 int hswep_uncore_pci_init(void)
3111 {
3112 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3113 	if (ret)
3114 		return ret;
3115 	uncore_pci_uncores = hswep_pci_uncores;
3116 	uncore_pci_driver = &hswep_uncore_pci_driver;
3117 	return 0;
3118 }
3119 /* end of Haswell-EP uncore support */
3120 
3121 /* BDX uncore support */
3122 
3123 static struct intel_uncore_type bdx_uncore_ubox = {
3124 	.name			= "ubox",
3125 	.num_counters		= 2,
3126 	.num_boxes		= 1,
3127 	.perf_ctr_bits		= 48,
3128 	.fixed_ctr_bits		= 48,
3129 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3130 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3131 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3132 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3133 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3134 	.num_shared_regs	= 1,
3135 	.ops			= &ivbep_uncore_msr_ops,
3136 	.format_group		= &ivbep_uncore_ubox_format_group,
3137 };
3138 
3139 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3140 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3141 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3142 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3143 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3144 	EVENT_CONSTRAINT_END
3145 };
3146 
3147 static struct intel_uncore_type bdx_uncore_cbox = {
3148 	.name			= "cbox",
3149 	.num_counters		= 4,
3150 	.num_boxes		= 24,
3151 	.perf_ctr_bits		= 48,
3152 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3153 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3154 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3155 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3156 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3157 	.num_shared_regs	= 1,
3158 	.constraints		= bdx_uncore_cbox_constraints,
3159 	.ops			= &hswep_uncore_cbox_ops,
3160 	.format_group		= &hswep_uncore_cbox_format_group,
3161 };
3162 
3163 static struct intel_uncore_type bdx_uncore_sbox = {
3164 	.name			= "sbox",
3165 	.num_counters		= 4,
3166 	.num_boxes		= 4,
3167 	.perf_ctr_bits		= 48,
3168 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3169 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3170 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3171 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3172 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3173 	.ops			= &hswep_uncore_sbox_msr_ops,
3174 	.format_group		= &hswep_uncore_sbox_format_group,
3175 };
3176 
3177 #define BDX_MSR_UNCORE_SBOX	3
3178 
3179 static struct intel_uncore_type *bdx_msr_uncores[] = {
3180 	&bdx_uncore_ubox,
3181 	&bdx_uncore_cbox,
3182 	&hswep_uncore_pcu,
3183 	&bdx_uncore_sbox,
3184 	NULL,
3185 };
3186 
3187 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3188 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3189 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3190 	EVENT_CONSTRAINT_END
3191 };
3192 
bdx_uncore_cpu_init(void)3193 void bdx_uncore_cpu_init(void)
3194 {
3195 	int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3196 
3197 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3198 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3199 	uncore_msr_uncores = bdx_msr_uncores;
3200 
3201 	/* BDX-DE doesn't have SBOX */
3202 	if (boot_cpu_data.x86_model == 86) {
3203 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3204 	/* Detect systems with no SBOXes */
3205 	} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3206 		struct pci_dev *pdev;
3207 		u32 capid4;
3208 
3209 		pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3210 		pci_read_config_dword(pdev, 0x94, &capid4);
3211 		if (((capid4 >> 6) & 0x3) == 0)
3212 			bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3213 	}
3214 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3215 }
3216 
3217 static struct intel_uncore_type bdx_uncore_ha = {
3218 	.name		= "ha",
3219 	.num_counters   = 4,
3220 	.num_boxes	= 2,
3221 	.perf_ctr_bits	= 48,
3222 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3223 };
3224 
3225 static struct intel_uncore_type bdx_uncore_imc = {
3226 	.name		= "imc",
3227 	.num_counters   = 4,
3228 	.num_boxes	= 8,
3229 	.perf_ctr_bits	= 48,
3230 	.fixed_ctr_bits	= 48,
3231 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3232 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3233 	.event_descs	= hswep_uncore_imc_events,
3234 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3235 };
3236 
3237 static struct intel_uncore_type bdx_uncore_irp = {
3238 	.name			= "irp",
3239 	.num_counters		= 4,
3240 	.num_boxes		= 1,
3241 	.perf_ctr_bits		= 48,
3242 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3243 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3244 	.ops			= &hswep_uncore_irp_ops,
3245 	.format_group		= &snbep_uncore_format_group,
3246 };
3247 
3248 static struct intel_uncore_type bdx_uncore_qpi = {
3249 	.name			= "qpi",
3250 	.num_counters		= 4,
3251 	.num_boxes		= 3,
3252 	.perf_ctr_bits		= 48,
3253 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3254 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3255 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3256 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3257 	.num_shared_regs	= 1,
3258 	.ops			= &snbep_uncore_qpi_ops,
3259 	.format_group		= &snbep_uncore_qpi_format_group,
3260 };
3261 
3262 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3263 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3264 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3265 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3266 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3267 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3268 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3269 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3270 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3271 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3272 	EVENT_CONSTRAINT_END
3273 };
3274 
3275 static struct intel_uncore_type bdx_uncore_r2pcie = {
3276 	.name		= "r2pcie",
3277 	.num_counters   = 4,
3278 	.num_boxes	= 1,
3279 	.perf_ctr_bits	= 48,
3280 	.constraints	= bdx_uncore_r2pcie_constraints,
3281 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3282 };
3283 
3284 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3285 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3286 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3287 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3288 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3289 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3290 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3291 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3292 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3293 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3294 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3295 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3296 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3297 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3298 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3299 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3300 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3301 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3302 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3303 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3304 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3305 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3306 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3307 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3308 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3309 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3310 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3311 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3312 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3313 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3314 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3315 	EVENT_CONSTRAINT_END
3316 };
3317 
3318 static struct intel_uncore_type bdx_uncore_r3qpi = {
3319 	.name		= "r3qpi",
3320 	.num_counters   = 3,
3321 	.num_boxes	= 3,
3322 	.perf_ctr_bits	= 48,
3323 	.constraints	= bdx_uncore_r3qpi_constraints,
3324 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3325 };
3326 
3327 enum {
3328 	BDX_PCI_UNCORE_HA,
3329 	BDX_PCI_UNCORE_IMC,
3330 	BDX_PCI_UNCORE_IRP,
3331 	BDX_PCI_UNCORE_QPI,
3332 	BDX_PCI_UNCORE_R2PCIE,
3333 	BDX_PCI_UNCORE_R3QPI,
3334 };
3335 
3336 static struct intel_uncore_type *bdx_pci_uncores[] = {
3337 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3338 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3339 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3340 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3341 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3342 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3343 	NULL,
3344 };
3345 
3346 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3347 	{ /* Home Agent 0 */
3348 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3349 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3350 	},
3351 	{ /* Home Agent 1 */
3352 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3353 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3354 	},
3355 	{ /* MC0 Channel 0 */
3356 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3357 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3358 	},
3359 	{ /* MC0 Channel 1 */
3360 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3361 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3362 	},
3363 	{ /* MC0 Channel 2 */
3364 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3365 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3366 	},
3367 	{ /* MC0 Channel 3 */
3368 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3369 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3370 	},
3371 	{ /* MC1 Channel 0 */
3372 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3373 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3374 	},
3375 	{ /* MC1 Channel 1 */
3376 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3377 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3378 	},
3379 	{ /* MC1 Channel 2 */
3380 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3381 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3382 	},
3383 	{ /* MC1 Channel 3 */
3384 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3385 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3386 	},
3387 	{ /* IRP */
3388 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3389 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3390 	},
3391 	{ /* QPI0 Port 0 */
3392 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3393 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3394 	},
3395 	{ /* QPI0 Port 1 */
3396 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3397 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3398 	},
3399 	{ /* QPI1 Port 2 */
3400 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3401 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3402 	},
3403 	{ /* R2PCIe */
3404 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3405 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3406 	},
3407 	{ /* R3QPI0 Link 0 */
3408 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3409 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3410 	},
3411 	{ /* R3QPI0 Link 1 */
3412 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3413 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3414 	},
3415 	{ /* R3QPI1 Link 2 */
3416 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3417 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3418 	},
3419 	{ /* QPI Port 0 filter  */
3420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3421 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3422 						   SNBEP_PCI_QPI_PORT0_FILTER),
3423 	},
3424 	{ /* QPI Port 1 filter  */
3425 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3426 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3427 						   SNBEP_PCI_QPI_PORT1_FILTER),
3428 	},
3429 	{ /* QPI Port 2 filter  */
3430 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3431 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3432 						   BDX_PCI_QPI_PORT2_FILTER),
3433 	},
3434 	{ /* PCU.3 (for Capability registers) */
3435 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3436 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3437 						   HSWEP_PCI_PCU_3),
3438 	},
3439 	{ /* end: all zeroes */ }
3440 };
3441 
3442 static struct pci_driver bdx_uncore_pci_driver = {
3443 	.name		= "bdx_uncore",
3444 	.id_table	= bdx_uncore_pci_ids,
3445 };
3446 
bdx_uncore_pci_init(void)3447 int bdx_uncore_pci_init(void)
3448 {
3449 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3450 
3451 	if (ret)
3452 		return ret;
3453 	uncore_pci_uncores = bdx_pci_uncores;
3454 	uncore_pci_driver = &bdx_uncore_pci_driver;
3455 	return 0;
3456 }
3457 
3458 /* end of BDX uncore support */
3459 
3460 /* SKX uncore support */
3461 
3462 static struct intel_uncore_type skx_uncore_ubox = {
3463 	.name			= "ubox",
3464 	.num_counters		= 2,
3465 	.num_boxes		= 1,
3466 	.perf_ctr_bits		= 48,
3467 	.fixed_ctr_bits		= 48,
3468 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3469 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3470 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3471 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3472 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3473 	.ops			= &ivbep_uncore_msr_ops,
3474 	.format_group		= &ivbep_uncore_ubox_format_group,
3475 };
3476 
3477 static struct attribute *skx_uncore_cha_formats_attr[] = {
3478 	&format_attr_event.attr,
3479 	&format_attr_umask.attr,
3480 	&format_attr_edge.attr,
3481 	&format_attr_tid_en.attr,
3482 	&format_attr_inv.attr,
3483 	&format_attr_thresh8.attr,
3484 	&format_attr_filter_tid4.attr,
3485 	&format_attr_filter_state5.attr,
3486 	&format_attr_filter_rem.attr,
3487 	&format_attr_filter_loc.attr,
3488 	&format_attr_filter_nm.attr,
3489 	&format_attr_filter_all_op.attr,
3490 	&format_attr_filter_not_nm.attr,
3491 	&format_attr_filter_opc_0.attr,
3492 	&format_attr_filter_opc_1.attr,
3493 	&format_attr_filter_nc.attr,
3494 	&format_attr_filter_isoc.attr,
3495 	NULL,
3496 };
3497 
3498 static const struct attribute_group skx_uncore_chabox_format_group = {
3499 	.name = "format",
3500 	.attrs = skx_uncore_cha_formats_attr,
3501 };
3502 
3503 static struct event_constraint skx_uncore_chabox_constraints[] = {
3504 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3505 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3506 	EVENT_CONSTRAINT_END
3507 };
3508 
3509 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3510 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3511 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3512 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3513 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3514 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3515 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3516 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3517 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3518 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3519 	EVENT_EXTRA_END
3520 };
3521 
skx_cha_filter_mask(int fields)3522 static u64 skx_cha_filter_mask(int fields)
3523 {
3524 	u64 mask = 0;
3525 
3526 	if (fields & 0x1)
3527 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3528 	if (fields & 0x2)
3529 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3530 	if (fields & 0x4)
3531 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3532 	if (fields & 0x8) {
3533 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3534 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3535 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3536 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3537 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3538 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3539 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3540 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3541 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3542 	}
3543 	return mask;
3544 }
3545 
3546 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3547 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3548 {
3549 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3550 }
3551 
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3552 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3553 {
3554 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3555 	struct extra_reg *er;
3556 	int idx = 0;
3557 
3558 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3559 		if (er->event != (event->hw.config & er->config_mask))
3560 			continue;
3561 		idx |= er->idx;
3562 	}
3563 
3564 	if (idx) {
3565 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3566 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3567 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3568 		reg1->idx = idx;
3569 	}
3570 	return 0;
3571 }
3572 
3573 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3574 	/* There is no frz_en for chabox ctl */
3575 	.init_box		= ivbep_uncore_msr_init_box,
3576 	.disable_box		= snbep_uncore_msr_disable_box,
3577 	.enable_box		= snbep_uncore_msr_enable_box,
3578 	.disable_event		= snbep_uncore_msr_disable_event,
3579 	.enable_event		= hswep_cbox_enable_event,
3580 	.read_counter		= uncore_msr_read_counter,
3581 	.hw_config		= skx_cha_hw_config,
3582 	.get_constraint		= skx_cha_get_constraint,
3583 	.put_constraint		= snbep_cbox_put_constraint,
3584 };
3585 
3586 static struct intel_uncore_type skx_uncore_chabox = {
3587 	.name			= "cha",
3588 	.num_counters		= 4,
3589 	.perf_ctr_bits		= 48,
3590 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3591 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3592 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3593 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3594 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3595 	.num_shared_regs	= 1,
3596 	.constraints		= skx_uncore_chabox_constraints,
3597 	.ops			= &skx_uncore_chabox_ops,
3598 	.format_group		= &skx_uncore_chabox_format_group,
3599 };
3600 
3601 static struct attribute *skx_uncore_iio_formats_attr[] = {
3602 	&format_attr_event.attr,
3603 	&format_attr_umask.attr,
3604 	&format_attr_edge.attr,
3605 	&format_attr_inv.attr,
3606 	&format_attr_thresh9.attr,
3607 	&format_attr_ch_mask.attr,
3608 	&format_attr_fc_mask.attr,
3609 	NULL,
3610 };
3611 
3612 static const struct attribute_group skx_uncore_iio_format_group = {
3613 	.name = "format",
3614 	.attrs = skx_uncore_iio_formats_attr,
3615 };
3616 
3617 static struct event_constraint skx_uncore_iio_constraints[] = {
3618 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3619 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3620 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3621 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3622 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3623 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3624 	EVENT_CONSTRAINT_END
3625 };
3626 
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3627 static void skx_iio_enable_event(struct intel_uncore_box *box,
3628 				 struct perf_event *event)
3629 {
3630 	struct hw_perf_event *hwc = &event->hw;
3631 
3632 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3633 }
3634 
3635 static struct intel_uncore_ops skx_uncore_iio_ops = {
3636 	.init_box		= ivbep_uncore_msr_init_box,
3637 	.disable_box		= snbep_uncore_msr_disable_box,
3638 	.enable_box		= snbep_uncore_msr_enable_box,
3639 	.disable_event		= snbep_uncore_msr_disable_event,
3640 	.enable_event		= skx_iio_enable_event,
3641 	.read_counter		= uncore_msr_read_counter,
3642 };
3643 
skx_iio_stack(struct intel_uncore_pmu * pmu,int die)3644 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3645 {
3646 	return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3647 }
3648 
3649 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3650 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3651 {
3652 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3653 
3654 	/* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3655 	return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3656 }
3657 
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3658 static ssize_t skx_iio_mapping_show(struct device *dev,
3659 				struct device_attribute *attr, char *buf)
3660 {
3661 	struct pci_bus *bus = pci_find_next_bus(NULL);
3662 	struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3663 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3664 	long die = (long)ea->var;
3665 
3666 	/*
3667 	 * Current implementation is for single segment configuration hence it's
3668 	 * safe to take the segment value from the first available root bus.
3669 	 */
3670 	return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3671 					   skx_iio_stack(uncore_pmu, die));
3672 }
3673 
skx_msr_cpu_bus_read(int cpu,u64 * topology)3674 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3675 {
3676 	u64 msr_value;
3677 
3678 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3679 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3680 		return -ENXIO;
3681 
3682 	*topology = msr_value;
3683 
3684 	return 0;
3685 }
3686 
die_to_cpu(int die)3687 static int die_to_cpu(int die)
3688 {
3689 	int res = 0, cpu, current_die;
3690 	/*
3691 	 * Using cpus_read_lock() to ensure cpu is not going down between
3692 	 * looking at cpu_online_mask.
3693 	 */
3694 	cpus_read_lock();
3695 	for_each_online_cpu(cpu) {
3696 		current_die = topology_logical_die_id(cpu);
3697 		if (current_die == die) {
3698 			res = cpu;
3699 			break;
3700 		}
3701 	}
3702 	cpus_read_unlock();
3703 	return res;
3704 }
3705 
skx_iio_get_topology(struct intel_uncore_type * type)3706 static int skx_iio_get_topology(struct intel_uncore_type *type)
3707 {
3708 	int i, ret;
3709 	struct pci_bus *bus = NULL;
3710 
3711 	/*
3712 	 * Verified single-segment environments only; disabled for multiple
3713 	 * segment topologies for now except VMD domains.
3714 	 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3715 	 */
3716 	while ((bus = pci_find_next_bus(bus))
3717 		&& (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3718 		;
3719 	if (bus)
3720 		return -EPERM;
3721 
3722 	type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3723 	if (!type->topology)
3724 		return -ENOMEM;
3725 
3726 	for (i = 0; i < uncore_max_dies(); i++) {
3727 		ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3728 		if (ret) {
3729 			kfree(type->topology);
3730 			type->topology = NULL;
3731 			return ret;
3732 		}
3733 	}
3734 
3735 	return 0;
3736 }
3737 
3738 static struct attribute_group skx_iio_mapping_group = {
3739 	.is_visible	= skx_iio_mapping_visible,
3740 };
3741 
3742 static const struct attribute_group *skx_iio_attr_update[] = {
3743 	&skx_iio_mapping_group,
3744 	NULL,
3745 };
3746 
skx_iio_set_mapping(struct intel_uncore_type * type)3747 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3748 {
3749 	char buf[64];
3750 	int ret;
3751 	long die = -1;
3752 	struct attribute **attrs = NULL;
3753 	struct dev_ext_attribute *eas = NULL;
3754 
3755 	ret = skx_iio_get_topology(type);
3756 	if (ret)
3757 		goto clear_attr_update;
3758 
3759 	ret = -ENOMEM;
3760 
3761 	/* One more for NULL. */
3762 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3763 	if (!attrs)
3764 		goto err;
3765 
3766 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3767 	if (!eas)
3768 		goto err;
3769 
3770 	for (die = 0; die < uncore_max_dies(); die++) {
3771 		sprintf(buf, "die%ld", die);
3772 		sysfs_attr_init(&eas[die].attr.attr);
3773 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3774 		if (!eas[die].attr.attr.name)
3775 			goto err;
3776 		eas[die].attr.attr.mode = 0444;
3777 		eas[die].attr.show = skx_iio_mapping_show;
3778 		eas[die].attr.store = NULL;
3779 		eas[die].var = (void *)die;
3780 		attrs[die] = &eas[die].attr.attr;
3781 	}
3782 	skx_iio_mapping_group.attrs = attrs;
3783 
3784 	return 0;
3785 err:
3786 	for (; die >= 0; die--)
3787 		kfree(eas[die].attr.attr.name);
3788 	kfree(eas);
3789 	kfree(attrs);
3790 	kfree(type->topology);
3791 clear_attr_update:
3792 	type->attr_update = NULL;
3793 	return ret;
3794 }
3795 
skx_iio_cleanup_mapping(struct intel_uncore_type * type)3796 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3797 {
3798 	struct attribute **attr = skx_iio_mapping_group.attrs;
3799 
3800 	if (!attr)
3801 		return;
3802 
3803 	for (; *attr; attr++)
3804 		kfree((*attr)->name);
3805 	kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3806 	kfree(skx_iio_mapping_group.attrs);
3807 	skx_iio_mapping_group.attrs = NULL;
3808 	kfree(type->topology);
3809 }
3810 
3811 static struct intel_uncore_type skx_uncore_iio = {
3812 	.name			= "iio",
3813 	.num_counters		= 4,
3814 	.num_boxes		= 6,
3815 	.perf_ctr_bits		= 48,
3816 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3817 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3818 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3819 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3820 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3821 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3822 	.constraints		= skx_uncore_iio_constraints,
3823 	.ops			= &skx_uncore_iio_ops,
3824 	.format_group		= &skx_uncore_iio_format_group,
3825 	.attr_update		= skx_iio_attr_update,
3826 	.set_mapping		= skx_iio_set_mapping,
3827 	.cleanup_mapping	= skx_iio_cleanup_mapping,
3828 };
3829 
3830 enum perf_uncore_iio_freerunning_type_id {
3831 	SKX_IIO_MSR_IOCLK			= 0,
3832 	SKX_IIO_MSR_BW				= 1,
3833 	SKX_IIO_MSR_UTIL			= 2,
3834 
3835 	SKX_IIO_FREERUNNING_TYPE_MAX,
3836 };
3837 
3838 
3839 static struct freerunning_counters skx_iio_freerunning[] = {
3840 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3841 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3842 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3843 };
3844 
3845 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3846 	/* Free-Running IO CLOCKS Counter */
3847 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3848 	/* Free-Running IIO BANDWIDTH Counters */
3849 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3850 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3851 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3852 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3853 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3854 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3855 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3856 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3857 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3858 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3859 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3860 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3861 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3862 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3863 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3864 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3865 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3866 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3867 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3868 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3869 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3870 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3871 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3872 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3873 	/* Free-running IIO UTILIZATION Counters */
3874 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3875 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3876 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3877 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3878 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3879 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3880 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3881 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3882 	{ /* end: all zeroes */ },
3883 };
3884 
3885 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3886 	.read_counter		= uncore_msr_read_counter,
3887 	.hw_config		= uncore_freerunning_hw_config,
3888 };
3889 
3890 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3891 	&format_attr_event.attr,
3892 	&format_attr_umask.attr,
3893 	NULL,
3894 };
3895 
3896 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3897 	.name = "format",
3898 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3899 };
3900 
3901 static struct intel_uncore_type skx_uncore_iio_free_running = {
3902 	.name			= "iio_free_running",
3903 	.num_counters		= 17,
3904 	.num_boxes		= 6,
3905 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3906 	.freerunning		= skx_iio_freerunning,
3907 	.ops			= &skx_uncore_iio_freerunning_ops,
3908 	.event_descs		= skx_uncore_iio_freerunning_events,
3909 	.format_group		= &skx_uncore_iio_freerunning_format_group,
3910 };
3911 
3912 static struct attribute *skx_uncore_formats_attr[] = {
3913 	&format_attr_event.attr,
3914 	&format_attr_umask.attr,
3915 	&format_attr_edge.attr,
3916 	&format_attr_inv.attr,
3917 	&format_attr_thresh8.attr,
3918 	NULL,
3919 };
3920 
3921 static const struct attribute_group skx_uncore_format_group = {
3922 	.name = "format",
3923 	.attrs = skx_uncore_formats_attr,
3924 };
3925 
3926 static struct intel_uncore_type skx_uncore_irp = {
3927 	.name			= "irp",
3928 	.num_counters		= 2,
3929 	.num_boxes		= 6,
3930 	.perf_ctr_bits		= 48,
3931 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
3932 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
3933 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3934 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
3935 	.msr_offset		= SKX_IRP_MSR_OFFSET,
3936 	.ops			= &skx_uncore_iio_ops,
3937 	.format_group		= &skx_uncore_format_group,
3938 };
3939 
3940 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3941 	&format_attr_event.attr,
3942 	&format_attr_umask.attr,
3943 	&format_attr_edge.attr,
3944 	&format_attr_inv.attr,
3945 	&format_attr_thresh8.attr,
3946 	&format_attr_occ_invert.attr,
3947 	&format_attr_occ_edge_det.attr,
3948 	&format_attr_filter_band0.attr,
3949 	&format_attr_filter_band1.attr,
3950 	&format_attr_filter_band2.attr,
3951 	&format_attr_filter_band3.attr,
3952 	NULL,
3953 };
3954 
3955 static struct attribute_group skx_uncore_pcu_format_group = {
3956 	.name = "format",
3957 	.attrs = skx_uncore_pcu_formats_attr,
3958 };
3959 
3960 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3961 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3962 	.hw_config		= hswep_pcu_hw_config,
3963 	.get_constraint		= snbep_pcu_get_constraint,
3964 	.put_constraint		= snbep_pcu_put_constraint,
3965 };
3966 
3967 static struct intel_uncore_type skx_uncore_pcu = {
3968 	.name			= "pcu",
3969 	.num_counters		= 4,
3970 	.num_boxes		= 1,
3971 	.perf_ctr_bits		= 48,
3972 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
3973 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
3974 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3975 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
3976 	.num_shared_regs	= 1,
3977 	.ops			= &skx_uncore_pcu_ops,
3978 	.format_group		= &skx_uncore_pcu_format_group,
3979 };
3980 
3981 static struct intel_uncore_type *skx_msr_uncores[] = {
3982 	&skx_uncore_ubox,
3983 	&skx_uncore_chabox,
3984 	&skx_uncore_iio,
3985 	&skx_uncore_iio_free_running,
3986 	&skx_uncore_irp,
3987 	&skx_uncore_pcu,
3988 	NULL,
3989 };
3990 
3991 /*
3992  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3993  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3994  */
3995 #define SKX_CAPID6		0x9c
3996 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
3997 
skx_count_chabox(void)3998 static int skx_count_chabox(void)
3999 {
4000 	struct pci_dev *dev = NULL;
4001 	u32 val = 0;
4002 
4003 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4004 	if (!dev)
4005 		goto out;
4006 
4007 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4008 	val &= SKX_CHA_BIT_MASK;
4009 out:
4010 	pci_dev_put(dev);
4011 	return hweight32(val);
4012 }
4013 
skx_uncore_cpu_init(void)4014 void skx_uncore_cpu_init(void)
4015 {
4016 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4017 	uncore_msr_uncores = skx_msr_uncores;
4018 }
4019 
4020 static struct intel_uncore_type skx_uncore_imc = {
4021 	.name		= "imc",
4022 	.num_counters   = 4,
4023 	.num_boxes	= 6,
4024 	.perf_ctr_bits	= 48,
4025 	.fixed_ctr_bits	= 48,
4026 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4027 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4028 	.event_descs	= hswep_uncore_imc_events,
4029 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4030 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4031 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4032 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4033 	.ops		= &ivbep_uncore_pci_ops,
4034 	.format_group	= &skx_uncore_format_group,
4035 };
4036 
4037 static struct attribute *skx_upi_uncore_formats_attr[] = {
4038 	&format_attr_event.attr,
4039 	&format_attr_umask_ext.attr,
4040 	&format_attr_edge.attr,
4041 	&format_attr_inv.attr,
4042 	&format_attr_thresh8.attr,
4043 	NULL,
4044 };
4045 
4046 static const struct attribute_group skx_upi_uncore_format_group = {
4047 	.name = "format",
4048 	.attrs = skx_upi_uncore_formats_attr,
4049 };
4050 
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4051 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4052 {
4053 	struct pci_dev *pdev = box->pci_dev;
4054 
4055 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4056 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4057 }
4058 
4059 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4060 	.init_box	= skx_upi_uncore_pci_init_box,
4061 	.disable_box	= snbep_uncore_pci_disable_box,
4062 	.enable_box	= snbep_uncore_pci_enable_box,
4063 	.disable_event	= snbep_uncore_pci_disable_event,
4064 	.enable_event	= snbep_uncore_pci_enable_event,
4065 	.read_counter	= snbep_uncore_pci_read_counter,
4066 };
4067 
4068 static struct intel_uncore_type skx_uncore_upi = {
4069 	.name		= "upi",
4070 	.num_counters   = 4,
4071 	.num_boxes	= 3,
4072 	.perf_ctr_bits	= 48,
4073 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4074 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4075 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4076 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4077 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4078 	.ops		= &skx_upi_uncore_pci_ops,
4079 	.format_group	= &skx_upi_uncore_format_group,
4080 };
4081 
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4082 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4083 {
4084 	struct pci_dev *pdev = box->pci_dev;
4085 
4086 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4087 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4088 }
4089 
4090 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4091 	.init_box	= skx_m2m_uncore_pci_init_box,
4092 	.disable_box	= snbep_uncore_pci_disable_box,
4093 	.enable_box	= snbep_uncore_pci_enable_box,
4094 	.disable_event	= snbep_uncore_pci_disable_event,
4095 	.enable_event	= snbep_uncore_pci_enable_event,
4096 	.read_counter	= snbep_uncore_pci_read_counter,
4097 };
4098 
4099 static struct intel_uncore_type skx_uncore_m2m = {
4100 	.name		= "m2m",
4101 	.num_counters   = 4,
4102 	.num_boxes	= 2,
4103 	.perf_ctr_bits	= 48,
4104 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4105 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4106 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4107 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4108 	.ops		= &skx_m2m_uncore_pci_ops,
4109 	.format_group	= &skx_uncore_format_group,
4110 };
4111 
4112 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4113 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4114 	EVENT_CONSTRAINT_END
4115 };
4116 
4117 static struct intel_uncore_type skx_uncore_m2pcie = {
4118 	.name		= "m2pcie",
4119 	.num_counters   = 4,
4120 	.num_boxes	= 4,
4121 	.perf_ctr_bits	= 48,
4122 	.constraints	= skx_uncore_m2pcie_constraints,
4123 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4124 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4125 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4126 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4127 	.ops		= &ivbep_uncore_pci_ops,
4128 	.format_group	= &skx_uncore_format_group,
4129 };
4130 
4131 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4132 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4133 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4134 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4135 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4136 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4137 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4138 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4139 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4140 	EVENT_CONSTRAINT_END
4141 };
4142 
4143 static struct intel_uncore_type skx_uncore_m3upi = {
4144 	.name		= "m3upi",
4145 	.num_counters   = 3,
4146 	.num_boxes	= 3,
4147 	.perf_ctr_bits	= 48,
4148 	.constraints	= skx_uncore_m3upi_constraints,
4149 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4150 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4151 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4152 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4153 	.ops		= &ivbep_uncore_pci_ops,
4154 	.format_group	= &skx_uncore_format_group,
4155 };
4156 
4157 enum {
4158 	SKX_PCI_UNCORE_IMC,
4159 	SKX_PCI_UNCORE_M2M,
4160 	SKX_PCI_UNCORE_UPI,
4161 	SKX_PCI_UNCORE_M2PCIE,
4162 	SKX_PCI_UNCORE_M3UPI,
4163 };
4164 
4165 static struct intel_uncore_type *skx_pci_uncores[] = {
4166 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4167 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4168 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4169 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4170 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4171 	NULL,
4172 };
4173 
4174 static const struct pci_device_id skx_uncore_pci_ids[] = {
4175 	{ /* MC0 Channel 0 */
4176 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4177 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4178 	},
4179 	{ /* MC0 Channel 1 */
4180 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4181 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4182 	},
4183 	{ /* MC0 Channel 2 */
4184 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4185 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4186 	},
4187 	{ /* MC1 Channel 0 */
4188 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4189 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4190 	},
4191 	{ /* MC1 Channel 1 */
4192 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4193 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4194 	},
4195 	{ /* MC1 Channel 2 */
4196 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4197 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4198 	},
4199 	{ /* M2M0 */
4200 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4201 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4202 	},
4203 	{ /* M2M1 */
4204 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4205 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4206 	},
4207 	{ /* UPI0 Link 0 */
4208 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4209 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4210 	},
4211 	{ /* UPI0 Link 1 */
4212 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4213 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4214 	},
4215 	{ /* UPI1 Link 2 */
4216 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4217 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4218 	},
4219 	{ /* M2PCIe 0 */
4220 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4221 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4222 	},
4223 	{ /* M2PCIe 1 */
4224 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4225 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4226 	},
4227 	{ /* M2PCIe 2 */
4228 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4229 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4230 	},
4231 	{ /* M2PCIe 3 */
4232 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4233 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4234 	},
4235 	{ /* M3UPI0 Link 0 */
4236 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4237 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4238 	},
4239 	{ /* M3UPI0 Link 1 */
4240 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4241 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4242 	},
4243 	{ /* M3UPI1 Link 2 */
4244 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4245 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4246 	},
4247 	{ /* end: all zeroes */ }
4248 };
4249 
4250 
4251 static struct pci_driver skx_uncore_pci_driver = {
4252 	.name		= "skx_uncore",
4253 	.id_table	= skx_uncore_pci_ids,
4254 };
4255 
skx_uncore_pci_init(void)4256 int skx_uncore_pci_init(void)
4257 {
4258 	/* need to double check pci address */
4259 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4260 
4261 	if (ret)
4262 		return ret;
4263 
4264 	uncore_pci_uncores = skx_pci_uncores;
4265 	uncore_pci_driver = &skx_uncore_pci_driver;
4266 	return 0;
4267 }
4268 
4269 /* end of SKX uncore support */
4270 
4271 /* SNR uncore support */
4272 
4273 static struct intel_uncore_type snr_uncore_ubox = {
4274 	.name			= "ubox",
4275 	.num_counters		= 2,
4276 	.num_boxes		= 1,
4277 	.perf_ctr_bits		= 48,
4278 	.fixed_ctr_bits		= 48,
4279 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4280 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4281 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4282 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4283 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4284 	.ops			= &ivbep_uncore_msr_ops,
4285 	.format_group		= &ivbep_uncore_format_group,
4286 };
4287 
4288 static struct attribute *snr_uncore_cha_formats_attr[] = {
4289 	&format_attr_event.attr,
4290 	&format_attr_umask_ext2.attr,
4291 	&format_attr_edge.attr,
4292 	&format_attr_tid_en.attr,
4293 	&format_attr_inv.attr,
4294 	&format_attr_thresh8.attr,
4295 	&format_attr_filter_tid5.attr,
4296 	NULL,
4297 };
4298 static const struct attribute_group snr_uncore_chabox_format_group = {
4299 	.name = "format",
4300 	.attrs = snr_uncore_cha_formats_attr,
4301 };
4302 
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4303 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4304 {
4305 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4306 
4307 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4308 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4309 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4310 	reg1->idx = 0;
4311 
4312 	return 0;
4313 }
4314 
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4315 static void snr_cha_enable_event(struct intel_uncore_box *box,
4316 				   struct perf_event *event)
4317 {
4318 	struct hw_perf_event *hwc = &event->hw;
4319 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4320 
4321 	if (reg1->idx != EXTRA_REG_NONE)
4322 		wrmsrl(reg1->reg, reg1->config);
4323 
4324 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4325 }
4326 
4327 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4328 	.init_box		= ivbep_uncore_msr_init_box,
4329 	.disable_box		= snbep_uncore_msr_disable_box,
4330 	.enable_box		= snbep_uncore_msr_enable_box,
4331 	.disable_event		= snbep_uncore_msr_disable_event,
4332 	.enable_event		= snr_cha_enable_event,
4333 	.read_counter		= uncore_msr_read_counter,
4334 	.hw_config		= snr_cha_hw_config,
4335 };
4336 
4337 static struct intel_uncore_type snr_uncore_chabox = {
4338 	.name			= "cha",
4339 	.num_counters		= 4,
4340 	.num_boxes		= 6,
4341 	.perf_ctr_bits		= 48,
4342 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4343 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4344 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4345 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4346 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4347 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4348 	.ops			= &snr_uncore_chabox_ops,
4349 	.format_group		= &snr_uncore_chabox_format_group,
4350 };
4351 
4352 static struct attribute *snr_uncore_iio_formats_attr[] = {
4353 	&format_attr_event.attr,
4354 	&format_attr_umask.attr,
4355 	&format_attr_edge.attr,
4356 	&format_attr_inv.attr,
4357 	&format_attr_thresh9.attr,
4358 	&format_attr_ch_mask2.attr,
4359 	&format_attr_fc_mask2.attr,
4360 	NULL,
4361 };
4362 
4363 static const struct attribute_group snr_uncore_iio_format_group = {
4364 	.name = "format",
4365 	.attrs = snr_uncore_iio_formats_attr,
4366 };
4367 
4368 static struct intel_uncore_type snr_uncore_iio = {
4369 	.name			= "iio",
4370 	.num_counters		= 4,
4371 	.num_boxes		= 5,
4372 	.perf_ctr_bits		= 48,
4373 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4374 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4375 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4376 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4377 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4378 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4379 	.ops			= &ivbep_uncore_msr_ops,
4380 	.format_group		= &snr_uncore_iio_format_group,
4381 };
4382 
4383 static struct intel_uncore_type snr_uncore_irp = {
4384 	.name			= "irp",
4385 	.num_counters		= 2,
4386 	.num_boxes		= 5,
4387 	.perf_ctr_bits		= 48,
4388 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4389 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4390 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4391 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4392 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4393 	.ops			= &ivbep_uncore_msr_ops,
4394 	.format_group		= &ivbep_uncore_format_group,
4395 };
4396 
4397 static struct intel_uncore_type snr_uncore_m2pcie = {
4398 	.name		= "m2pcie",
4399 	.num_counters	= 4,
4400 	.num_boxes	= 5,
4401 	.perf_ctr_bits	= 48,
4402 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4403 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4404 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4405 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4406 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4407 	.ops		= &ivbep_uncore_msr_ops,
4408 	.format_group	= &ivbep_uncore_format_group,
4409 };
4410 
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4411 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4412 {
4413 	struct hw_perf_event *hwc = &event->hw;
4414 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4415 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4416 
4417 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4418 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4419 		reg1->idx = ev_sel - 0xb;
4420 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4421 	}
4422 	return 0;
4423 }
4424 
4425 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4426 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4427 	.hw_config		= snr_pcu_hw_config,
4428 	.get_constraint		= snbep_pcu_get_constraint,
4429 	.put_constraint		= snbep_pcu_put_constraint,
4430 };
4431 
4432 static struct intel_uncore_type snr_uncore_pcu = {
4433 	.name			= "pcu",
4434 	.num_counters		= 4,
4435 	.num_boxes		= 1,
4436 	.perf_ctr_bits		= 48,
4437 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4438 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4439 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4440 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4441 	.num_shared_regs	= 1,
4442 	.ops			= &snr_uncore_pcu_ops,
4443 	.format_group		= &skx_uncore_pcu_format_group,
4444 };
4445 
4446 enum perf_uncore_snr_iio_freerunning_type_id {
4447 	SNR_IIO_MSR_IOCLK,
4448 	SNR_IIO_MSR_BW_IN,
4449 
4450 	SNR_IIO_FREERUNNING_TYPE_MAX,
4451 };
4452 
4453 static struct freerunning_counters snr_iio_freerunning[] = {
4454 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4455 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4456 };
4457 
4458 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4459 	/* Free-Running IIO CLOCKS Counter */
4460 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4461 	/* Free-Running IIO BANDWIDTH IN Counters */
4462 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4463 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4464 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4465 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4466 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4467 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4468 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4469 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4470 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4471 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4472 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4473 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4474 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4475 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4476 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4477 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4478 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4479 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4480 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4481 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4482 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4483 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4484 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4485 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4486 	{ /* end: all zeroes */ },
4487 };
4488 
4489 static struct intel_uncore_type snr_uncore_iio_free_running = {
4490 	.name			= "iio_free_running",
4491 	.num_counters		= 9,
4492 	.num_boxes		= 5,
4493 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4494 	.freerunning		= snr_iio_freerunning,
4495 	.ops			= &skx_uncore_iio_freerunning_ops,
4496 	.event_descs		= snr_uncore_iio_freerunning_events,
4497 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4498 };
4499 
4500 static struct intel_uncore_type *snr_msr_uncores[] = {
4501 	&snr_uncore_ubox,
4502 	&snr_uncore_chabox,
4503 	&snr_uncore_iio,
4504 	&snr_uncore_irp,
4505 	&snr_uncore_m2pcie,
4506 	&snr_uncore_pcu,
4507 	&snr_uncore_iio_free_running,
4508 	NULL,
4509 };
4510 
snr_uncore_cpu_init(void)4511 void snr_uncore_cpu_init(void)
4512 {
4513 	uncore_msr_uncores = snr_msr_uncores;
4514 }
4515 
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4516 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4517 {
4518 	struct pci_dev *pdev = box->pci_dev;
4519 	int box_ctl = uncore_pci_box_ctl(box);
4520 
4521 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4522 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4523 }
4524 
4525 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4526 	.init_box	= snr_m2m_uncore_pci_init_box,
4527 	.disable_box	= snbep_uncore_pci_disable_box,
4528 	.enable_box	= snbep_uncore_pci_enable_box,
4529 	.disable_event	= snbep_uncore_pci_disable_event,
4530 	.enable_event	= snbep_uncore_pci_enable_event,
4531 	.read_counter	= snbep_uncore_pci_read_counter,
4532 };
4533 
4534 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4535 	&format_attr_event.attr,
4536 	&format_attr_umask_ext3.attr,
4537 	&format_attr_edge.attr,
4538 	&format_attr_inv.attr,
4539 	&format_attr_thresh8.attr,
4540 	NULL,
4541 };
4542 
4543 static const struct attribute_group snr_m2m_uncore_format_group = {
4544 	.name = "format",
4545 	.attrs = snr_m2m_uncore_formats_attr,
4546 };
4547 
4548 static struct intel_uncore_type snr_uncore_m2m = {
4549 	.name		= "m2m",
4550 	.num_counters   = 4,
4551 	.num_boxes	= 1,
4552 	.perf_ctr_bits	= 48,
4553 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4554 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4555 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4556 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4557 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4558 	.ops		= &snr_m2m_uncore_pci_ops,
4559 	.format_group	= &snr_m2m_uncore_format_group,
4560 };
4561 
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4562 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4563 {
4564 	struct pci_dev *pdev = box->pci_dev;
4565 	struct hw_perf_event *hwc = &event->hw;
4566 
4567 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4568 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4569 }
4570 
4571 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4572 	.init_box	= snr_m2m_uncore_pci_init_box,
4573 	.disable_box	= snbep_uncore_pci_disable_box,
4574 	.enable_box	= snbep_uncore_pci_enable_box,
4575 	.disable_event	= snbep_uncore_pci_disable_event,
4576 	.enable_event	= snr_uncore_pci_enable_event,
4577 	.read_counter	= snbep_uncore_pci_read_counter,
4578 };
4579 
4580 static struct intel_uncore_type snr_uncore_pcie3 = {
4581 	.name		= "pcie3",
4582 	.num_counters	= 4,
4583 	.num_boxes	= 1,
4584 	.perf_ctr_bits	= 48,
4585 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
4586 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
4587 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
4588 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4589 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
4590 	.ops		= &snr_pcie3_uncore_pci_ops,
4591 	.format_group	= &skx_uncore_iio_format_group,
4592 };
4593 
4594 enum {
4595 	SNR_PCI_UNCORE_M2M,
4596 	SNR_PCI_UNCORE_PCIE3,
4597 };
4598 
4599 static struct intel_uncore_type *snr_pci_uncores[] = {
4600 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4601 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
4602 	NULL,
4603 };
4604 
4605 static const struct pci_device_id snr_uncore_pci_ids[] = {
4606 	{ /* M2M */
4607 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4608 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4609 	},
4610 	{ /* end: all zeroes */ }
4611 };
4612 
4613 static struct pci_driver snr_uncore_pci_driver = {
4614 	.name		= "snr_uncore",
4615 	.id_table	= snr_uncore_pci_ids,
4616 };
4617 
4618 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4619 	{ /* PCIe3 RP */
4620 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4621 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4622 	},
4623 	{ /* end: all zeroes */ }
4624 };
4625 
4626 static struct pci_driver snr_uncore_pci_sub_driver = {
4627 	.name		= "snr_uncore_sub",
4628 	.id_table	= snr_uncore_pci_sub_ids,
4629 };
4630 
snr_uncore_pci_init(void)4631 int snr_uncore_pci_init(void)
4632 {
4633 	/* SNR UBOX DID */
4634 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4635 					 SKX_GIDNIDMAP, true);
4636 
4637 	if (ret)
4638 		return ret;
4639 
4640 	uncore_pci_uncores = snr_pci_uncores;
4641 	uncore_pci_driver = &snr_uncore_pci_driver;
4642 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4643 	return 0;
4644 }
4645 
snr_uncore_get_mc_dev(int id)4646 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4647 {
4648 	struct pci_dev *mc_dev = NULL;
4649 	int phys_id, pkg;
4650 
4651 	while (1) {
4652 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4653 		if (!mc_dev)
4654 			break;
4655 		phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4656 		if (phys_id < 0)
4657 			continue;
4658 		pkg = topology_phys_to_logical_pkg(phys_id);
4659 		if (pkg < 0)
4660 			continue;
4661 		else if (pkg == id)
4662 			break;
4663 	}
4664 	return mc_dev;
4665 }
4666 
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset)4667 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4668 				       unsigned int box_ctl, int mem_offset)
4669 {
4670 	struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4671 	struct intel_uncore_type *type = box->pmu->type;
4672 	resource_size_t addr;
4673 	u32 pci_dword;
4674 
4675 	if (!pdev)
4676 		return;
4677 
4678 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4679 	addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4680 
4681 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
4682 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4683 
4684 	addr += box_ctl;
4685 
4686 	box->io_addr = ioremap(addr, type->mmio_map_size);
4687 	if (!box->io_addr) {
4688 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4689 		return;
4690 	}
4691 
4692 	writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4693 }
4694 
snr_uncore_mmio_init_box(struct intel_uncore_box * box)4695 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4696 {
4697 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4698 				   SNR_IMC_MMIO_MEM0_OFFSET);
4699 }
4700 
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)4701 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4702 {
4703 	u32 config;
4704 
4705 	if (!box->io_addr)
4706 		return;
4707 
4708 	config = readl(box->io_addr);
4709 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4710 	writel(config, box->io_addr);
4711 }
4712 
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)4713 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4714 {
4715 	u32 config;
4716 
4717 	if (!box->io_addr)
4718 		return;
4719 
4720 	config = readl(box->io_addr);
4721 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4722 	writel(config, box->io_addr);
4723 }
4724 
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)4725 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4726 					   struct perf_event *event)
4727 {
4728 	struct hw_perf_event *hwc = &event->hw;
4729 
4730 	if (!box->io_addr)
4731 		return;
4732 
4733 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4734 		return;
4735 
4736 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4737 	       box->io_addr + hwc->config_base);
4738 }
4739 
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)4740 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4741 					    struct perf_event *event)
4742 {
4743 	struct hw_perf_event *hwc = &event->hw;
4744 
4745 	if (!box->io_addr)
4746 		return;
4747 
4748 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4749 		return;
4750 
4751 	writel(hwc->config, box->io_addr + hwc->config_base);
4752 }
4753 
4754 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4755 	.init_box	= snr_uncore_mmio_init_box,
4756 	.exit_box	= uncore_mmio_exit_box,
4757 	.disable_box	= snr_uncore_mmio_disable_box,
4758 	.enable_box	= snr_uncore_mmio_enable_box,
4759 	.disable_event	= snr_uncore_mmio_disable_event,
4760 	.enable_event	= snr_uncore_mmio_enable_event,
4761 	.read_counter	= uncore_mmio_read_counter,
4762 };
4763 
4764 static struct uncore_event_desc snr_uncore_imc_events[] = {
4765 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4766 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4767 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4768 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4769 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4770 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4771 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4772 	{ /* end: all zeroes */ },
4773 };
4774 
4775 static struct intel_uncore_type snr_uncore_imc = {
4776 	.name		= "imc",
4777 	.num_counters   = 4,
4778 	.num_boxes	= 2,
4779 	.perf_ctr_bits	= 48,
4780 	.fixed_ctr_bits	= 48,
4781 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4782 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4783 	.event_descs	= snr_uncore_imc_events,
4784 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4785 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4786 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4787 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4788 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4789 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
4790 	.ops		= &snr_uncore_mmio_ops,
4791 	.format_group	= &skx_uncore_format_group,
4792 };
4793 
4794 enum perf_uncore_snr_imc_freerunning_type_id {
4795 	SNR_IMC_DCLK,
4796 	SNR_IMC_DDR,
4797 
4798 	SNR_IMC_FREERUNNING_TYPE_MAX,
4799 };
4800 
4801 static struct freerunning_counters snr_imc_freerunning[] = {
4802 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
4803 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
4804 };
4805 
4806 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4807 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
4808 
4809 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
4810 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
4811 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
4812 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
4813 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
4814 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
4815 	{ /* end: all zeroes */ },
4816 };
4817 
4818 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4819 	.init_box	= snr_uncore_mmio_init_box,
4820 	.exit_box	= uncore_mmio_exit_box,
4821 	.read_counter	= uncore_mmio_read_counter,
4822 	.hw_config	= uncore_freerunning_hw_config,
4823 };
4824 
4825 static struct intel_uncore_type snr_uncore_imc_free_running = {
4826 	.name			= "imc_free_running",
4827 	.num_counters		= 3,
4828 	.num_boxes		= 1,
4829 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
4830 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
4831 	.freerunning		= snr_imc_freerunning,
4832 	.ops			= &snr_uncore_imc_freerunning_ops,
4833 	.event_descs		= snr_uncore_imc_freerunning_events,
4834 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4835 };
4836 
4837 static struct intel_uncore_type *snr_mmio_uncores[] = {
4838 	&snr_uncore_imc,
4839 	&snr_uncore_imc_free_running,
4840 	NULL,
4841 };
4842 
snr_uncore_mmio_init(void)4843 void snr_uncore_mmio_init(void)
4844 {
4845 	uncore_mmio_uncores = snr_mmio_uncores;
4846 }
4847 
4848 /* end of SNR uncore support */
4849 
4850 /* ICX uncore support */
4851 
4852 static unsigned icx_cha_msr_offsets[] = {
4853 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4854 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4855 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4856 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4857 	0x1c,  0x2a,  0x38,  0x46,
4858 };
4859 
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4860 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4861 {
4862 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4863 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4864 
4865 	if (tie_en) {
4866 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4867 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
4868 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4869 		reg1->idx = 0;
4870 	}
4871 
4872 	return 0;
4873 }
4874 
4875 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4876 	.init_box		= ivbep_uncore_msr_init_box,
4877 	.disable_box		= snbep_uncore_msr_disable_box,
4878 	.enable_box		= snbep_uncore_msr_enable_box,
4879 	.disable_event		= snbep_uncore_msr_disable_event,
4880 	.enable_event		= snr_cha_enable_event,
4881 	.read_counter		= uncore_msr_read_counter,
4882 	.hw_config		= icx_cha_hw_config,
4883 };
4884 
4885 static struct intel_uncore_type icx_uncore_chabox = {
4886 	.name			= "cha",
4887 	.num_counters		= 4,
4888 	.perf_ctr_bits		= 48,
4889 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
4890 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
4891 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
4892 	.msr_offsets		= icx_cha_msr_offsets,
4893 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4894 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4895 	.constraints		= skx_uncore_chabox_constraints,
4896 	.ops			= &icx_uncore_chabox_ops,
4897 	.format_group		= &snr_uncore_chabox_format_group,
4898 };
4899 
4900 static unsigned icx_msr_offsets[] = {
4901 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4902 };
4903 
4904 static struct event_constraint icx_uncore_iio_constraints[] = {
4905 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4906 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4907 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4908 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4909 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4910 	EVENT_CONSTRAINT_END
4911 };
4912 
4913 static struct intel_uncore_type icx_uncore_iio = {
4914 	.name			= "iio",
4915 	.num_counters		= 4,
4916 	.num_boxes		= 6,
4917 	.perf_ctr_bits		= 48,
4918 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
4919 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
4920 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4921 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4922 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
4923 	.msr_offsets		= icx_msr_offsets,
4924 	.constraints		= icx_uncore_iio_constraints,
4925 	.ops			= &skx_uncore_iio_ops,
4926 	.format_group		= &snr_uncore_iio_format_group,
4927 };
4928 
4929 static struct intel_uncore_type icx_uncore_irp = {
4930 	.name			= "irp",
4931 	.num_counters		= 2,
4932 	.num_boxes		= 6,
4933 	.perf_ctr_bits		= 48,
4934 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
4935 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
4936 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4937 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
4938 	.msr_offsets		= icx_msr_offsets,
4939 	.ops			= &ivbep_uncore_msr_ops,
4940 	.format_group		= &ivbep_uncore_format_group,
4941 };
4942 
4943 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4944 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4945 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4946 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4947 	EVENT_CONSTRAINT_END
4948 };
4949 
4950 static struct intel_uncore_type icx_uncore_m2pcie = {
4951 	.name		= "m2pcie",
4952 	.num_counters	= 4,
4953 	.num_boxes	= 6,
4954 	.perf_ctr_bits	= 48,
4955 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
4956 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
4957 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
4958 	.msr_offsets	= icx_msr_offsets,
4959 	.constraints	= icx_uncore_m2pcie_constraints,
4960 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4961 	.ops		= &ivbep_uncore_msr_ops,
4962 	.format_group	= &ivbep_uncore_format_group,
4963 };
4964 
4965 enum perf_uncore_icx_iio_freerunning_type_id {
4966 	ICX_IIO_MSR_IOCLK,
4967 	ICX_IIO_MSR_BW_IN,
4968 
4969 	ICX_IIO_FREERUNNING_TYPE_MAX,
4970 };
4971 
4972 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4973 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4974 };
4975 
4976 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
4977 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
4978 };
4979 
4980 static struct freerunning_counters icx_iio_freerunning[] = {
4981 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
4982 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
4983 };
4984 
4985 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
4986 	/* Free-Running IIO CLOCKS Counter */
4987 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4988 	/* Free-Running IIO BANDWIDTH IN Counters */
4989 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4990 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4991 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4992 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4993 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4994 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4995 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4996 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4997 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4998 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4999 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5000 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5001 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5002 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5003 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5004 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5005 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5006 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5007 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5008 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5009 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5010 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5011 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5012 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5013 	{ /* end: all zeroes */ },
5014 };
5015 
5016 static struct intel_uncore_type icx_uncore_iio_free_running = {
5017 	.name			= "iio_free_running",
5018 	.num_counters		= 9,
5019 	.num_boxes		= 6,
5020 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5021 	.freerunning		= icx_iio_freerunning,
5022 	.ops			= &skx_uncore_iio_freerunning_ops,
5023 	.event_descs		= icx_uncore_iio_freerunning_events,
5024 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5025 };
5026 
5027 static struct intel_uncore_type *icx_msr_uncores[] = {
5028 	&skx_uncore_ubox,
5029 	&icx_uncore_chabox,
5030 	&icx_uncore_iio,
5031 	&icx_uncore_irp,
5032 	&icx_uncore_m2pcie,
5033 	&skx_uncore_pcu,
5034 	&icx_uncore_iio_free_running,
5035 	NULL,
5036 };
5037 
5038 /*
5039  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5040  * registers which located at Device 30, Function 3
5041  */
5042 #define ICX_CAPID6		0x9c
5043 #define ICX_CAPID7		0xa0
5044 
icx_count_chabox(void)5045 static u64 icx_count_chabox(void)
5046 {
5047 	struct pci_dev *dev = NULL;
5048 	u64 caps = 0;
5049 
5050 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5051 	if (!dev)
5052 		goto out;
5053 
5054 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5055 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5056 out:
5057 	pci_dev_put(dev);
5058 	return hweight64(caps);
5059 }
5060 
icx_uncore_cpu_init(void)5061 void icx_uncore_cpu_init(void)
5062 {
5063 	u64 num_boxes = icx_count_chabox();
5064 
5065 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5066 		return;
5067 	icx_uncore_chabox.num_boxes = num_boxes;
5068 	uncore_msr_uncores = icx_msr_uncores;
5069 }
5070 
5071 static struct intel_uncore_type icx_uncore_m2m = {
5072 	.name		= "m2m",
5073 	.num_counters   = 4,
5074 	.num_boxes	= 4,
5075 	.perf_ctr_bits	= 48,
5076 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5077 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5078 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5079 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5080 	.ops		= &snr_m2m_uncore_pci_ops,
5081 	.format_group	= &skx_uncore_format_group,
5082 };
5083 
5084 static struct attribute *icx_upi_uncore_formats_attr[] = {
5085 	&format_attr_event.attr,
5086 	&format_attr_umask_ext4.attr,
5087 	&format_attr_edge.attr,
5088 	&format_attr_inv.attr,
5089 	&format_attr_thresh8.attr,
5090 	NULL,
5091 };
5092 
5093 static const struct attribute_group icx_upi_uncore_format_group = {
5094 	.name = "format",
5095 	.attrs = icx_upi_uncore_formats_attr,
5096 };
5097 
5098 static struct intel_uncore_type icx_uncore_upi = {
5099 	.name		= "upi",
5100 	.num_counters   = 4,
5101 	.num_boxes	= 3,
5102 	.perf_ctr_bits	= 48,
5103 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5104 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5105 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5106 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5107 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5108 	.ops		= &skx_upi_uncore_pci_ops,
5109 	.format_group	= &icx_upi_uncore_format_group,
5110 };
5111 
5112 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5113 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5114 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5115 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5116 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5117 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5118 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5119 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5120 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5121 	EVENT_CONSTRAINT_END
5122 };
5123 
5124 static struct intel_uncore_type icx_uncore_m3upi = {
5125 	.name		= "m3upi",
5126 	.num_counters   = 4,
5127 	.num_boxes	= 3,
5128 	.perf_ctr_bits	= 48,
5129 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5130 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5131 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5132 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5133 	.constraints	= icx_uncore_m3upi_constraints,
5134 	.ops		= &ivbep_uncore_pci_ops,
5135 	.format_group	= &skx_uncore_format_group,
5136 };
5137 
5138 enum {
5139 	ICX_PCI_UNCORE_M2M,
5140 	ICX_PCI_UNCORE_UPI,
5141 	ICX_PCI_UNCORE_M3UPI,
5142 };
5143 
5144 static struct intel_uncore_type *icx_pci_uncores[] = {
5145 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5146 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5147 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5148 	NULL,
5149 };
5150 
5151 static const struct pci_device_id icx_uncore_pci_ids[] = {
5152 	{ /* M2M 0 */
5153 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5154 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5155 	},
5156 	{ /* M2M 1 */
5157 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5158 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5159 	},
5160 	{ /* M2M 2 */
5161 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5162 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5163 	},
5164 	{ /* M2M 3 */
5165 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5166 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5167 	},
5168 	{ /* UPI Link 0 */
5169 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5170 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5171 	},
5172 	{ /* UPI Link 1 */
5173 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5174 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5175 	},
5176 	{ /* UPI Link 2 */
5177 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5178 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5179 	},
5180 	{ /* M3UPI Link 0 */
5181 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5182 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5183 	},
5184 	{ /* M3UPI Link 1 */
5185 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5186 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5187 	},
5188 	{ /* M3UPI Link 2 */
5189 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5190 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5191 	},
5192 	{ /* end: all zeroes */ }
5193 };
5194 
5195 static struct pci_driver icx_uncore_pci_driver = {
5196 	.name		= "icx_uncore",
5197 	.id_table	= icx_uncore_pci_ids,
5198 };
5199 
icx_uncore_pci_init(void)5200 int icx_uncore_pci_init(void)
5201 {
5202 	/* ICX UBOX DID */
5203 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5204 					 SKX_GIDNIDMAP, true);
5205 
5206 	if (ret)
5207 		return ret;
5208 
5209 	uncore_pci_uncores = icx_pci_uncores;
5210 	uncore_pci_driver = &icx_uncore_pci_driver;
5211 	return 0;
5212 }
5213 
icx_uncore_imc_init_box(struct intel_uncore_box * box)5214 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5215 {
5216 	unsigned int box_ctl = box->pmu->type->box_ctl +
5217 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5218 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5219 			 SNR_IMC_MMIO_MEM0_OFFSET;
5220 
5221 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5222 }
5223 
5224 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5225 	.init_box	= icx_uncore_imc_init_box,
5226 	.exit_box	= uncore_mmio_exit_box,
5227 	.disable_box	= snr_uncore_mmio_disable_box,
5228 	.enable_box	= snr_uncore_mmio_enable_box,
5229 	.disable_event	= snr_uncore_mmio_disable_event,
5230 	.enable_event	= snr_uncore_mmio_enable_event,
5231 	.read_counter	= uncore_mmio_read_counter,
5232 };
5233 
5234 static struct intel_uncore_type icx_uncore_imc = {
5235 	.name		= "imc",
5236 	.num_counters   = 4,
5237 	.num_boxes	= 8,
5238 	.perf_ctr_bits	= 48,
5239 	.fixed_ctr_bits	= 48,
5240 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5241 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5242 	.event_descs	= hswep_uncore_imc_events,
5243 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5244 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5245 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5246 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5247 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5248 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5249 	.ops		= &icx_uncore_mmio_ops,
5250 	.format_group	= &skx_uncore_format_group,
5251 };
5252 
5253 enum perf_uncore_icx_imc_freerunning_type_id {
5254 	ICX_IMC_DCLK,
5255 	ICX_IMC_DDR,
5256 	ICX_IMC_DDRT,
5257 
5258 	ICX_IMC_FREERUNNING_TYPE_MAX,
5259 };
5260 
5261 static struct freerunning_counters icx_imc_freerunning[] = {
5262 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5263 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5264 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5265 };
5266 
5267 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5268 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5269 
5270 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5271 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5272 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5273 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5274 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5275 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5276 
5277 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5278 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5279 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5280 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5281 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5282 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5283 	{ /* end: all zeroes */ },
5284 };
5285 
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5286 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5287 {
5288 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5289 			 SNR_IMC_MMIO_MEM0_OFFSET;
5290 
5291 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5292 }
5293 
5294 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5295 	.init_box	= icx_uncore_imc_freerunning_init_box,
5296 	.exit_box	= uncore_mmio_exit_box,
5297 	.read_counter	= uncore_mmio_read_counter,
5298 	.hw_config	= uncore_freerunning_hw_config,
5299 };
5300 
5301 static struct intel_uncore_type icx_uncore_imc_free_running = {
5302 	.name			= "imc_free_running",
5303 	.num_counters		= 5,
5304 	.num_boxes		= 4,
5305 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5306 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5307 	.freerunning		= icx_imc_freerunning,
5308 	.ops			= &icx_uncore_imc_freerunning_ops,
5309 	.event_descs		= icx_uncore_imc_freerunning_events,
5310 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5311 };
5312 
5313 static struct intel_uncore_type *icx_mmio_uncores[] = {
5314 	&icx_uncore_imc,
5315 	&icx_uncore_imc_free_running,
5316 	NULL,
5317 };
5318 
icx_uncore_mmio_init(void)5319 void icx_uncore_mmio_init(void)
5320 {
5321 	uncore_mmio_uncores = icx_mmio_uncores;
5322 }
5323 
5324 /* end of ICX uncore support */
5325