1  // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2  /*
3   * Copyright(c) 2015 - 2020 Intel Corporation.
4   * Copyright(c) 2021 Cornelis Networks.
5   */
6  
7  /*
8   * This file contains all of the code that is specific to the HFI chip
9   */
10  
11  #include <linux/pci.h>
12  #include <linux/delay.h>
13  #include <linux/interrupt.h>
14  #include <linux/module.h>
15  
16  #include "hfi.h"
17  #include "trace.h"
18  #include "mad.h"
19  #include "pio.h"
20  #include "sdma.h"
21  #include "eprom.h"
22  #include "efivar.h"
23  #include "platform.h"
24  #include "aspm.h"
25  #include "affinity.h"
26  #include "debugfs.h"
27  #include "fault.h"
28  #include "netdev.h"
29  
30  uint num_vls = HFI1_MAX_VLS_SUPPORTED;
31  module_param(num_vls, uint, S_IRUGO);
32  MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
33  
34  /*
35   * Default time to aggregate two 10K packets from the idle state
36   * (timer not running). The timer starts at the end of the first packet,
37   * so only the time for one 10K packet and header plus a bit extra is needed.
38   * 10 * 1024 + 64 header byte = 10304 byte
39   * 10304 byte / 12.5 GB/s = 824.32ns
40   */
41  uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
42  module_param(rcv_intr_timeout, uint, S_IRUGO);
43  MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
44  
45  uint rcv_intr_count = 16; /* same as qib */
46  module_param(rcv_intr_count, uint, S_IRUGO);
47  MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
48  
49  ushort link_crc_mask = SUPPORTED_CRCS;
50  module_param(link_crc_mask, ushort, S_IRUGO);
51  MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
52  
53  uint loopback;
54  module_param_named(loopback, loopback, uint, S_IRUGO);
55  MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
56  
57  /* Other driver tunables */
58  uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
59  static ushort crc_14b_sideband = 1;
60  static uint use_flr = 1;
61  uint quick_linkup; /* skip LNI */
62  
63  struct flag_table {
64  	u64 flag;	/* the flag */
65  	char *str;	/* description string */
66  	u16 extra;	/* extra information */
67  	u16 unused0;
68  	u32 unused1;
69  };
70  
71  /* str must be a string constant */
72  #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
73  #define FLAG_ENTRY0(str, flag) {flag, str, 0}
74  
75  /* Send Error Consequences */
76  #define SEC_WRITE_DROPPED	0x1
77  #define SEC_PACKET_DROPPED	0x2
78  #define SEC_SC_HALTED		0x4	/* per-context only */
79  #define SEC_SPC_FREEZE		0x8	/* per-HFI only */
80  
81  #define DEFAULT_KRCVQS		  2
82  #define MIN_KERNEL_KCTXTS         2
83  #define FIRST_KERNEL_KCTXT        1
84  
85  /*
86   * RSM instance allocation
87   *   0 - User Fecn Handling
88   *   1 - Vnic
89   *   2 - AIP
90   *   3 - Verbs
91   */
92  #define RSM_INS_FECN              0
93  #define RSM_INS_VNIC              1
94  #define RSM_INS_AIP               2
95  #define RSM_INS_VERBS             3
96  
97  /* Bit offset into the GUID which carries HFI id information */
98  #define GUID_HFI_INDEX_SHIFT     39
99  
100  /* extract the emulation revision */
101  #define emulator_rev(dd) ((dd)->irev >> 8)
102  /* parallel and serial emulation versions are 3 and 4 respectively */
103  #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
104  #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
105  
106  /* RSM fields for Verbs */
107  /* packet type */
108  #define IB_PACKET_TYPE         2ull
109  #define QW_SHIFT               6ull
110  /* QPN[7..1] */
111  #define QPN_WIDTH              7ull
112  
113  /* LRH.BTH: QW 0, OFFSET 48 - for match */
114  #define LRH_BTH_QW             0ull
115  #define LRH_BTH_BIT_OFFSET     48ull
116  #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
117  #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
118  #define LRH_BTH_SELECT
119  #define LRH_BTH_MASK           3ull
120  #define LRH_BTH_VALUE          2ull
121  
122  /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
123  #define LRH_SC_QW              0ull
124  #define LRH_SC_BIT_OFFSET      56ull
125  #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
126  #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
127  #define LRH_SC_MASK            128ull
128  #define LRH_SC_VALUE           0ull
129  
130  /* SC[n..0] QW 0, OFFSET 60 - for select */
131  #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
132  
133  /* QPN[m+n:1] QW 1, OFFSET 1 */
134  #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
135  
136  /* RSM fields for AIP */
137  /* LRH.BTH above is reused for this rule */
138  
139  /* BTH.DESTQP: QW 1, OFFSET 16 for match */
140  #define BTH_DESTQP_QW           1ull
141  #define BTH_DESTQP_BIT_OFFSET   16ull
142  #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
143  #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
144  #define BTH_DESTQP_MASK         0xFFull
145  #define BTH_DESTQP_VALUE        0x81ull
146  
147  /* DETH.SQPN: QW 1 Offset 56 for select */
148  /* We use 8 most significant Soure QPN bits as entropy fpr AIP */
149  #define DETH_AIP_SQPN_QW 3ull
150  #define DETH_AIP_SQPN_BIT_OFFSET 56ull
151  #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
152  #define DETH_AIP_SQPN_SELECT_OFFSET \
153  	DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
154  
155  /* RSM fields for Vnic */
156  /* L2_TYPE: QW 0, OFFSET 61 - for match */
157  #define L2_TYPE_QW             0ull
158  #define L2_TYPE_BIT_OFFSET     61ull
159  #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
160  #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
161  #define L2_TYPE_MASK           3ull
162  #define L2_16B_VALUE           2ull
163  
164  /* L4_TYPE QW 1, OFFSET 0 - for match */
165  #define L4_TYPE_QW              1ull
166  #define L4_TYPE_BIT_OFFSET      0ull
167  #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
168  #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
169  #define L4_16B_TYPE_MASK        0xFFull
170  #define L4_16B_ETH_VALUE        0x78ull
171  
172  /* 16B VESWID - for select */
173  #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
174  /* 16B ENTROPY - for select */
175  #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
176  
177  /* defines to build power on SC2VL table */
178  #define SC2VL_VAL( \
179  	num, \
180  	sc0, sc0val, \
181  	sc1, sc1val, \
182  	sc2, sc2val, \
183  	sc3, sc3val, \
184  	sc4, sc4val, \
185  	sc5, sc5val, \
186  	sc6, sc6val, \
187  	sc7, sc7val) \
188  ( \
189  	((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
190  	((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
191  	((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
192  	((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
193  	((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
194  	((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
195  	((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
196  	((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
197  )
198  
199  #define DC_SC_VL_VAL( \
200  	range, \
201  	e0, e0val, \
202  	e1, e1val, \
203  	e2, e2val, \
204  	e3, e3val, \
205  	e4, e4val, \
206  	e5, e5val, \
207  	e6, e6val, \
208  	e7, e7val, \
209  	e8, e8val, \
210  	e9, e9val, \
211  	e10, e10val, \
212  	e11, e11val, \
213  	e12, e12val, \
214  	e13, e13val, \
215  	e14, e14val, \
216  	e15, e15val) \
217  ( \
218  	((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
219  	((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
220  	((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
221  	((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
222  	((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
223  	((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
224  	((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
225  	((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
226  	((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
227  	((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
228  	((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
229  	((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
230  	((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
231  	((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
232  	((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
233  	((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
234  )
235  
236  /* all CceStatus sub-block freeze bits */
237  #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
238  			| CCE_STATUS_RXE_FROZE_SMASK \
239  			| CCE_STATUS_TXE_FROZE_SMASK \
240  			| CCE_STATUS_TXE_PIO_FROZE_SMASK)
241  /* all CceStatus sub-block TXE pause bits */
242  #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
243  			| CCE_STATUS_TXE_PAUSED_SMASK \
244  			| CCE_STATUS_SDMA_PAUSED_SMASK)
245  /* all CceStatus sub-block RXE pause bits */
246  #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
247  
248  #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
249  #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
250  
251  /*
252   * CCE Error flags.
253   */
254  static struct flag_table cce_err_status_flags[] = {
255  /* 0*/	FLAG_ENTRY0("CceCsrParityErr",
256  		CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
257  /* 1*/	FLAG_ENTRY0("CceCsrReadBadAddrErr",
258  		CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
259  /* 2*/	FLAG_ENTRY0("CceCsrWriteBadAddrErr",
260  		CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
261  /* 3*/	FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
262  		CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
263  /* 4*/	FLAG_ENTRY0("CceTrgtAccessErr",
264  		CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
265  /* 5*/	FLAG_ENTRY0("CceRspdDataParityErr",
266  		CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
267  /* 6*/	FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
268  		CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
269  /* 7*/	FLAG_ENTRY0("CceCsrCfgBusParityErr",
270  		CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
271  /* 8*/	FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
272  		CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
273  /* 9*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
274  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
275  /*10*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
276  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
277  /*11*/	FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
278  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
279  /*12*/	FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
280  		CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
281  /*13*/	FLAG_ENTRY0("PcicRetryMemCorErr",
282  		CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
283  /*14*/	FLAG_ENTRY0("PcicRetryMemCorErr",
284  		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
285  /*15*/	FLAG_ENTRY0("PcicPostHdQCorErr",
286  		CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
287  /*16*/	FLAG_ENTRY0("PcicPostHdQCorErr",
288  		CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
289  /*17*/	FLAG_ENTRY0("PcicPostHdQCorErr",
290  		CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
291  /*18*/	FLAG_ENTRY0("PcicCplDatQCorErr",
292  		CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
293  /*19*/	FLAG_ENTRY0("PcicNPostHQParityErr",
294  		CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
295  /*20*/	FLAG_ENTRY0("PcicNPostDatQParityErr",
296  		CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
297  /*21*/	FLAG_ENTRY0("PcicRetryMemUncErr",
298  		CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
299  /*22*/	FLAG_ENTRY0("PcicRetrySotMemUncErr",
300  		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
301  /*23*/	FLAG_ENTRY0("PcicPostHdQUncErr",
302  		CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
303  /*24*/	FLAG_ENTRY0("PcicPostDatQUncErr",
304  		CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
305  /*25*/	FLAG_ENTRY0("PcicCplHdQUncErr",
306  		CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
307  /*26*/	FLAG_ENTRY0("PcicCplDatQUncErr",
308  		CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
309  /*27*/	FLAG_ENTRY0("PcicTransmitFrontParityErr",
310  		CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
311  /*28*/	FLAG_ENTRY0("PcicTransmitBackParityErr",
312  		CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
313  /*29*/	FLAG_ENTRY0("PcicReceiveParityErr",
314  		CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
315  /*30*/	FLAG_ENTRY0("CceTrgtCplTimeoutErr",
316  		CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
317  /*31*/	FLAG_ENTRY0("LATriggered",
318  		CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
319  /*32*/	FLAG_ENTRY0("CceSegReadBadAddrErr",
320  		CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
321  /*33*/	FLAG_ENTRY0("CceSegWriteBadAddrErr",
322  		CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
323  /*34*/	FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
324  		CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
325  /*35*/	FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
326  		CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
327  /*36*/	FLAG_ENTRY0("CceMsixTableCorErr",
328  		CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
329  /*37*/	FLAG_ENTRY0("CceMsixTableUncErr",
330  		CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
331  /*38*/	FLAG_ENTRY0("CceIntMapCorErr",
332  		CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
333  /*39*/	FLAG_ENTRY0("CceIntMapUncErr",
334  		CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
335  /*40*/	FLAG_ENTRY0("CceMsixCsrParityErr",
336  		CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
337  /*41-63 reserved*/
338  };
339  
340  /*
341   * Misc Error flags
342   */
343  #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
344  static struct flag_table misc_err_status_flags[] = {
345  /* 0*/	FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
346  /* 1*/	FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
347  /* 2*/	FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
348  /* 3*/	FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
349  /* 4*/	FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
350  /* 5*/	FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
351  /* 6*/	FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
352  /* 7*/	FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
353  /* 8*/	FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
354  /* 9*/	FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
355  /*10*/	FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
356  /*11*/	FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
357  /*12*/	FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
358  };
359  
360  /*
361   * TXE PIO Error flags and consequences
362   */
363  static struct flag_table pio_err_status_flags[] = {
364  /* 0*/	FLAG_ENTRY("PioWriteBadCtxt",
365  	SEC_WRITE_DROPPED,
366  	SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
367  /* 1*/	FLAG_ENTRY("PioWriteAddrParity",
368  	SEC_SPC_FREEZE,
369  	SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
370  /* 2*/	FLAG_ENTRY("PioCsrParity",
371  	SEC_SPC_FREEZE,
372  	SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
373  /* 3*/	FLAG_ENTRY("PioSbMemFifo0",
374  	SEC_SPC_FREEZE,
375  	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
376  /* 4*/	FLAG_ENTRY("PioSbMemFifo1",
377  	SEC_SPC_FREEZE,
378  	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
379  /* 5*/	FLAG_ENTRY("PioPccFifoParity",
380  	SEC_SPC_FREEZE,
381  	SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
382  /* 6*/	FLAG_ENTRY("PioPecFifoParity",
383  	SEC_SPC_FREEZE,
384  	SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
385  /* 7*/	FLAG_ENTRY("PioSbrdctlCrrelParity",
386  	SEC_SPC_FREEZE,
387  	SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
388  /* 8*/	FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
389  	SEC_SPC_FREEZE,
390  	SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
391  /* 9*/	FLAG_ENTRY("PioPktEvictFifoParityErr",
392  	SEC_SPC_FREEZE,
393  	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
394  /*10*/	FLAG_ENTRY("PioSmPktResetParity",
395  	SEC_SPC_FREEZE,
396  	SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
397  /*11*/	FLAG_ENTRY("PioVlLenMemBank0Unc",
398  	SEC_SPC_FREEZE,
399  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
400  /*12*/	FLAG_ENTRY("PioVlLenMemBank1Unc",
401  	SEC_SPC_FREEZE,
402  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
403  /*13*/	FLAG_ENTRY("PioVlLenMemBank0Cor",
404  	0,
405  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
406  /*14*/	FLAG_ENTRY("PioVlLenMemBank1Cor",
407  	0,
408  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
409  /*15*/	FLAG_ENTRY("PioCreditRetFifoParity",
410  	SEC_SPC_FREEZE,
411  	SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
412  /*16*/	FLAG_ENTRY("PioPpmcPblFifo",
413  	SEC_SPC_FREEZE,
414  	SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
415  /*17*/	FLAG_ENTRY("PioInitSmIn",
416  	0,
417  	SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
418  /*18*/	FLAG_ENTRY("PioPktEvictSmOrArbSm",
419  	SEC_SPC_FREEZE,
420  	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
421  /*19*/	FLAG_ENTRY("PioHostAddrMemUnc",
422  	SEC_SPC_FREEZE,
423  	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
424  /*20*/	FLAG_ENTRY("PioHostAddrMemCor",
425  	0,
426  	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
427  /*21*/	FLAG_ENTRY("PioWriteDataParity",
428  	SEC_SPC_FREEZE,
429  	SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
430  /*22*/	FLAG_ENTRY("PioStateMachine",
431  	SEC_SPC_FREEZE,
432  	SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
433  /*23*/	FLAG_ENTRY("PioWriteQwValidParity",
434  	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
435  	SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
436  /*24*/	FLAG_ENTRY("PioBlockQwCountParity",
437  	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
438  	SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
439  /*25*/	FLAG_ENTRY("PioVlfVlLenParity",
440  	SEC_SPC_FREEZE,
441  	SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
442  /*26*/	FLAG_ENTRY("PioVlfSopParity",
443  	SEC_SPC_FREEZE,
444  	SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
445  /*27*/	FLAG_ENTRY("PioVlFifoParity",
446  	SEC_SPC_FREEZE,
447  	SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
448  /*28*/	FLAG_ENTRY("PioPpmcBqcMemParity",
449  	SEC_SPC_FREEZE,
450  	SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
451  /*29*/	FLAG_ENTRY("PioPpmcSopLen",
452  	SEC_SPC_FREEZE,
453  	SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
454  /*30-31 reserved*/
455  /*32*/	FLAG_ENTRY("PioCurrentFreeCntParity",
456  	SEC_SPC_FREEZE,
457  	SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
458  /*33*/	FLAG_ENTRY("PioLastReturnedCntParity",
459  	SEC_SPC_FREEZE,
460  	SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
461  /*34*/	FLAG_ENTRY("PioPccSopHeadParity",
462  	SEC_SPC_FREEZE,
463  	SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
464  /*35*/	FLAG_ENTRY("PioPecSopHeadParityErr",
465  	SEC_SPC_FREEZE,
466  	SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
467  /*36-63 reserved*/
468  };
469  
470  /* TXE PIO errors that cause an SPC freeze */
471  #define ALL_PIO_FREEZE_ERR \
472  	(SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
473  	| SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
474  	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
475  	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
476  	| SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
477  	| SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
478  	| SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
479  	| SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
480  	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
481  	| SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
482  	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
483  	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
484  	| SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
485  	| SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
486  	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
487  	| SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
488  	| SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
489  	| SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
490  	| SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
491  	| SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
492  	| SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
493  	| SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
494  	| SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
495  	| SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
496  	| SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
497  	| SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
498  	| SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
499  	| SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
500  	| SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
501  
502  /*
503   * TXE SDMA Error flags
504   */
505  static struct flag_table sdma_err_status_flags[] = {
506  /* 0*/	FLAG_ENTRY0("SDmaRpyTagErr",
507  		SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
508  /* 1*/	FLAG_ENTRY0("SDmaCsrParityErr",
509  		SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
510  /* 2*/	FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
511  		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
512  /* 3*/	FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
513  		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
514  /*04-63 reserved*/
515  };
516  
517  /* TXE SDMA errors that cause an SPC freeze */
518  #define ALL_SDMA_FREEZE_ERR  \
519  		(SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
520  		| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
521  		| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
522  
523  /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
524  #define PORT_DISCARD_EGRESS_ERRS \
525  	(SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
526  	| SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
527  	| SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
528  
529  /*
530   * TXE Egress Error flags
531   */
532  #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
533  static struct flag_table egress_err_status_flags[] = {
534  /* 0*/	FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
535  /* 1*/	FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
536  /* 2 reserved */
537  /* 3*/	FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
538  		SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
539  /* 4*/	FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
540  /* 5*/	FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
541  /* 6 reserved */
542  /* 7*/	FLAG_ENTRY0("TxPioLaunchIntfParityErr",
543  		SEES(TX_PIO_LAUNCH_INTF_PARITY)),
544  /* 8*/	FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
545  		SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
546  /* 9-10 reserved */
547  /*11*/	FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
548  		SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
549  /*12*/	FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
550  /*13*/	FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
551  /*14*/	FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
552  /*15*/	FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
553  /*16*/	FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
554  		SEES(TX_SDMA0_DISALLOWED_PACKET)),
555  /*17*/	FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
556  		SEES(TX_SDMA1_DISALLOWED_PACKET)),
557  /*18*/	FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
558  		SEES(TX_SDMA2_DISALLOWED_PACKET)),
559  /*19*/	FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
560  		SEES(TX_SDMA3_DISALLOWED_PACKET)),
561  /*20*/	FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
562  		SEES(TX_SDMA4_DISALLOWED_PACKET)),
563  /*21*/	FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
564  		SEES(TX_SDMA5_DISALLOWED_PACKET)),
565  /*22*/	FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
566  		SEES(TX_SDMA6_DISALLOWED_PACKET)),
567  /*23*/	FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
568  		SEES(TX_SDMA7_DISALLOWED_PACKET)),
569  /*24*/	FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
570  		SEES(TX_SDMA8_DISALLOWED_PACKET)),
571  /*25*/	FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
572  		SEES(TX_SDMA9_DISALLOWED_PACKET)),
573  /*26*/	FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
574  		SEES(TX_SDMA10_DISALLOWED_PACKET)),
575  /*27*/	FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
576  		SEES(TX_SDMA11_DISALLOWED_PACKET)),
577  /*28*/	FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
578  		SEES(TX_SDMA12_DISALLOWED_PACKET)),
579  /*29*/	FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
580  		SEES(TX_SDMA13_DISALLOWED_PACKET)),
581  /*30*/	FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
582  		SEES(TX_SDMA14_DISALLOWED_PACKET)),
583  /*31*/	FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
584  		SEES(TX_SDMA15_DISALLOWED_PACKET)),
585  /*32*/	FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
586  		SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
587  /*33*/	FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
588  		SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
589  /*34*/	FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
590  		SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
591  /*35*/	FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
592  		SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
593  /*36*/	FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
594  		SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
595  /*37*/	FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
596  		SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
597  /*38*/	FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
598  		SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
599  /*39*/	FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
600  		SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
601  /*40*/	FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
602  		SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
603  /*41*/	FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
604  /*42*/	FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
605  /*43*/	FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
606  /*44*/	FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
607  /*45*/	FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
608  /*46*/	FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
609  /*47*/	FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
610  /*48*/	FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
611  /*49*/	FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
612  /*50*/	FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
613  /*51*/	FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
614  /*52*/	FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
615  /*53*/	FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
616  /*54*/	FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
617  /*55*/	FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
618  /*56*/	FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
619  /*57*/	FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
620  /*58*/	FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
621  /*59*/	FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
622  /*60*/	FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
623  /*61*/	FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
624  /*62*/	FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
625  		SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
626  /*63*/	FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
627  		SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
628  };
629  
630  /*
631   * TXE Egress Error Info flags
632   */
633  #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
634  static struct flag_table egress_err_info_flags[] = {
635  /* 0*/	FLAG_ENTRY0("Reserved", 0ull),
636  /* 1*/	FLAG_ENTRY0("VLErr", SEEI(VL)),
637  /* 2*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
638  /* 3*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
639  /* 4*/	FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
640  /* 5*/	FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
641  /* 6*/	FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
642  /* 7*/	FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
643  /* 8*/	FLAG_ENTRY0("RawErr", SEEI(RAW)),
644  /* 9*/	FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
645  /*10*/	FLAG_ENTRY0("GRHErr", SEEI(GRH)),
646  /*11*/	FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
647  /*12*/	FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
648  /*13*/	FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
649  /*14*/	FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
650  /*15*/	FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
651  /*16*/	FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
652  /*17*/	FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
653  /*18*/	FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
654  /*19*/	FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
655  /*20*/	FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
656  /*21*/	FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
657  };
658  
659  /* TXE Egress errors that cause an SPC freeze */
660  #define ALL_TXE_EGRESS_FREEZE_ERR \
661  	(SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
662  	| SEES(TX_PIO_LAUNCH_INTF_PARITY) \
663  	| SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
664  	| SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
665  	| SEES(TX_LAUNCH_CSR_PARITY) \
666  	| SEES(TX_SBRD_CTL_CSR_PARITY) \
667  	| SEES(TX_CONFIG_PARITY) \
668  	| SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
669  	| SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
670  	| SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
671  	| SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
672  	| SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
673  	| SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
674  	| SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
675  	| SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
676  	| SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
677  	| SEES(TX_CREDIT_RETURN_PARITY))
678  
679  /*
680   * TXE Send error flags
681   */
682  #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
683  static struct flag_table send_err_status_flags[] = {
684  /* 0*/	FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
685  /* 1*/	FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
686  /* 2*/	FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
687  };
688  
689  /*
690   * TXE Send Context Error flags and consequences
691   */
692  static struct flag_table sc_err_status_flags[] = {
693  /* 0*/	FLAG_ENTRY("InconsistentSop",
694  		SEC_PACKET_DROPPED | SEC_SC_HALTED,
695  		SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
696  /* 1*/	FLAG_ENTRY("DisallowedPacket",
697  		SEC_PACKET_DROPPED | SEC_SC_HALTED,
698  		SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
699  /* 2*/	FLAG_ENTRY("WriteCrossesBoundary",
700  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
701  		SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
702  /* 3*/	FLAG_ENTRY("WriteOverflow",
703  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
704  		SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
705  /* 4*/	FLAG_ENTRY("WriteOutOfBounds",
706  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
707  		SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
708  /* 5-63 reserved*/
709  };
710  
711  /*
712   * RXE Receive Error flags
713   */
714  #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
715  static struct flag_table rxe_err_status_flags[] = {
716  /* 0*/	FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
717  /* 1*/	FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
718  /* 2*/	FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
719  /* 3*/	FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
720  /* 4*/	FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
721  /* 5*/	FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
722  /* 6*/	FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
723  /* 7*/	FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
724  /* 8*/	FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
725  /* 9*/	FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
726  /*10*/	FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
727  /*11*/	FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
728  /*12*/	FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
729  /*13*/	FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
730  /*14*/	FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
731  /*15*/	FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
732  /*16*/	FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
733  		RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
734  /*17*/	FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
735  /*18*/	FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
736  /*19*/	FLAG_ENTRY0("RxRbufBlockListReadUncErr",
737  		RXES(RBUF_BLOCK_LIST_READ_UNC)),
738  /*20*/	FLAG_ENTRY0("RxRbufBlockListReadCorErr",
739  		RXES(RBUF_BLOCK_LIST_READ_COR)),
740  /*21*/	FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
741  		RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
742  /*22*/	FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
743  		RXES(RBUF_CSR_QENT_CNT_PARITY)),
744  /*23*/	FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
745  		RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
746  /*24*/	FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
747  		RXES(RBUF_CSR_QVLD_BIT_PARITY)),
748  /*25*/	FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
749  /*26*/	FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
750  /*27*/	FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
751  		RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
752  /*28*/	FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
753  /*29*/	FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
754  /*30*/	FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
755  /*31*/	FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
756  /*32*/	FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
757  /*33*/	FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
758  /*34*/	FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
759  /*35*/	FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
760  		RXES(RBUF_FL_INITDONE_PARITY)),
761  /*36*/	FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
762  		RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
763  /*37*/	FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
764  /*38*/	FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
765  /*39*/	FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
766  /*40*/	FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
767  		RXES(LOOKUP_DES_PART1_UNC_COR)),
768  /*41*/	FLAG_ENTRY0("RxLookupDesPart2ParityErr",
769  		RXES(LOOKUP_DES_PART2_PARITY)),
770  /*42*/	FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
771  /*43*/	FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
772  /*44*/	FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
773  /*45*/	FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
774  /*46*/	FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
775  /*47*/	FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
776  /*48*/	FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
777  /*49*/	FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
778  /*50*/	FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
779  /*51*/	FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
780  /*52*/	FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
781  /*53*/	FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
782  /*54*/	FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
783  /*55*/	FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
784  /*56*/	FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
785  /*57*/	FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
786  /*58*/	FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
787  /*59*/	FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
788  /*60*/	FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
789  /*61*/	FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
790  /*62*/	FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
791  /*63*/	FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
792  };
793  
794  /* RXE errors that will trigger an SPC freeze */
795  #define ALL_RXE_FREEZE_ERR  \
796  	(RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
797  	| RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
798  	| RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
799  	| RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
800  	| RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
801  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
802  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
803  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
804  	| RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
805  	| RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
806  	| RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
807  	| RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
808  	| RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
809  	| RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
810  	| RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
811  	| RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
812  	| RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
813  	| RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
814  	| RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
815  	| RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
816  	| RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
817  	| RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
818  	| RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
819  	| RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
820  	| RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
821  	| RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
822  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
823  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
824  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
825  	| RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
826  	| RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
827  	| RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
828  	| RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
829  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
830  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
831  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
832  	| RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
833  	| RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
834  	| RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
835  	| RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
836  	| RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
837  	| RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
838  	| RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
839  	| RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
840  
841  #define RXE_FREEZE_ABORT_MASK \
842  	(RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
843  	RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
844  	RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
845  
846  /*
847   * DCC Error Flags
848   */
849  #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
850  static struct flag_table dcc_err_flags[] = {
851  	FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
852  	FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
853  	FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
854  	FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
855  	FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
856  	FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
857  	FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
858  	FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
859  	FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
860  	FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
861  	FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
862  	FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
863  	FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
864  	FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
865  	FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
866  	FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
867  	FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
868  	FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
869  	FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
870  	FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
871  	FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
872  	FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
873  	FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
874  	FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
875  	FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
876  	FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
877  	FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
878  	FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
879  	FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
880  	FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
881  	FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
882  	FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
883  	FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
884  	FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
885  	FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
886  	FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
887  	FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
888  	FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
889  	FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
890  	FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
891  	FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
892  	FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
893  	FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
894  	FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
895  	FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
896  	FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
897  };
898  
899  /*
900   * LCB error flags
901   */
902  #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
903  static struct flag_table lcb_err_flags[] = {
904  /* 0*/	FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
905  /* 1*/	FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
906  /* 2*/	FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
907  /* 3*/	FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
908  		LCBE(ALL_LNS_FAILED_REINIT_TEST)),
909  /* 4*/	FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
910  /* 5*/	FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
911  /* 6*/	FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
912  /* 7*/	FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
913  /* 8*/	FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
914  /* 9*/	FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
915  /*10*/	FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
916  /*11*/	FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
917  /*12*/	FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
918  /*13*/	FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
919  		LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
920  /*14*/	FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
921  /*15*/	FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
922  /*16*/	FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
923  /*17*/	FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
924  /*18*/	FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
925  /*19*/	FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
926  		LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
927  /*20*/	FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
928  /*21*/	FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
929  /*22*/	FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
930  /*23*/	FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
931  /*24*/	FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
932  /*25*/	FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
933  /*26*/	FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
934  		LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
935  /*27*/	FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
936  /*28*/	FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
937  		LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
938  /*29*/	FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
939  		LCBE(REDUNDANT_FLIT_PARITY_ERR))
940  };
941  
942  /*
943   * DC8051 Error Flags
944   */
945  #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
946  static struct flag_table dc8051_err_flags[] = {
947  	FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
948  	FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
949  	FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
950  	FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
951  	FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
952  	FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
953  	FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
954  	FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
955  	FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
956  		    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
957  	FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
958  };
959  
960  /*
961   * DC8051 Information Error flags
962   *
963   * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
964   */
965  static struct flag_table dc8051_info_err_flags[] = {
966  	FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
967  	FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
968  	FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
969  	FLAG_ENTRY0("Serdes internal loopback failure",
970  		    FAILED_SERDES_INTERNAL_LOOPBACK),
971  	FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
972  	FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
973  	FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
974  	FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
975  	FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
976  	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
977  	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
978  	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
979  	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
980  	FLAG_ENTRY0("External Device Request Timeout",
981  		    EXTERNAL_DEVICE_REQ_TIMEOUT),
982  };
983  
984  /*
985   * DC8051 Information Host Information flags
986   *
987   * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
988   */
989  static struct flag_table dc8051_info_host_msg_flags[] = {
990  	FLAG_ENTRY0("Host request done", 0x0001),
991  	FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
992  	FLAG_ENTRY0("BC SMA message", 0x0004),
993  	FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
994  	FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
995  	FLAG_ENTRY0("External device config request", 0x0020),
996  	FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
997  	FLAG_ENTRY0("LinkUp achieved", 0x0080),
998  	FLAG_ENTRY0("Link going down", 0x0100),
999  	FLAG_ENTRY0("Link width downgraded", 0x0200),
1000  };
1001  
1002  static u32 encoded_size(u32 size);
1003  static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1004  static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1005  static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1006  			       u8 *continuous);
1007  static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1008  				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1009  static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1010  				      u8 *remote_tx_rate, u16 *link_widths);
1011  static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1012  				    u8 *flag_bits, u16 *link_widths);
1013  static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1014  				  u8 *device_rev);
1015  static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1016  static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1017  			    u8 *tx_polarity_inversion,
1018  			    u8 *rx_polarity_inversion, u8 *max_rate);
1019  static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1020  				unsigned int context, u64 err_status);
1021  static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1022  static void handle_dcc_err(struct hfi1_devdata *dd,
1023  			   unsigned int context, u64 err_status);
1024  static void handle_lcb_err(struct hfi1_devdata *dd,
1025  			   unsigned int context, u64 err_status);
1026  static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027  static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028  static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029  static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1030  static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1031  static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1032  static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1033  static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1034  static void set_partition_keys(struct hfi1_pportdata *ppd);
1035  static const char *link_state_name(u32 state);
1036  static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1037  					  u32 state);
1038  static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1039  			   u64 *out_data);
1040  static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1041  static int thermal_init(struct hfi1_devdata *dd);
1042  
1043  static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1044  static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1045  					    int msecs);
1046  static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1047  				  int msecs);
1048  static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1049  static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1050  static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1051  				   int msecs);
1052  static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1053  					 int msecs);
1054  static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1055  static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1056  static void handle_temp_err(struct hfi1_devdata *dd);
1057  static void dc_shutdown(struct hfi1_devdata *dd);
1058  static void dc_start(struct hfi1_devdata *dd);
1059  static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1060  			   unsigned int *np);
1061  static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1062  static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1063  static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1064  static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1065  
1066  /*
1067   * Error interrupt table entry.  This is used as input to the interrupt
1068   * "clear down" routine used for all second tier error interrupt register.
1069   * Second tier interrupt registers have a single bit representing them
1070   * in the top-level CceIntStatus.
1071   */
1072  struct err_reg_info {
1073  	u32 status;		/* status CSR offset */
1074  	u32 clear;		/* clear CSR offset */
1075  	u32 mask;		/* mask CSR offset */
1076  	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1077  	const char *desc;
1078  };
1079  
1080  #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1081  #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1082  #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1083  
1084  /*
1085   * Helpers for building HFI and DC error interrupt table entries.  Different
1086   * helpers are needed because of inconsistent register names.
1087   */
1088  #define EE(reg, handler, desc) \
1089  	{ reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1090  		handler, desc }
1091  #define DC_EE1(reg, handler, desc) \
1092  	{ reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1093  #define DC_EE2(reg, handler, desc) \
1094  	{ reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1095  
1096  /*
1097   * Table of the "misc" grouping of error interrupts.  Each entry refers to
1098   * another register containing more information.
1099   */
1100  static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1101  /* 0*/	EE(CCE_ERR,		handle_cce_err,    "CceErr"),
1102  /* 1*/	EE(RCV_ERR,		handle_rxe_err,    "RxeErr"),
1103  /* 2*/	EE(MISC_ERR,	handle_misc_err,   "MiscErr"),
1104  /* 3*/	{ 0, 0, 0, NULL }, /* reserved */
1105  /* 4*/	EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1106  /* 5*/	EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1107  /* 6*/	EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1108  /* 7*/	EE(SEND_ERR,	handle_txe_err,    "TxeErr")
1109  	/* the rest are reserved */
1110  };
1111  
1112  /*
1113   * Index into the Various section of the interrupt sources
1114   * corresponding to the Critical Temperature interrupt.
1115   */
1116  #define TCRIT_INT_SOURCE 4
1117  
1118  /*
1119   * SDMA error interrupt entry - refers to another register containing more
1120   * information.
1121   */
1122  static const struct err_reg_info sdma_eng_err =
1123  	EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1124  
1125  static const struct err_reg_info various_err[NUM_VARIOUS] = {
1126  /* 0*/	{ 0, 0, 0, NULL }, /* PbcInt */
1127  /* 1*/	{ 0, 0, 0, NULL }, /* GpioAssertInt */
1128  /* 2*/	EE(ASIC_QSFP1,	handle_qsfp_int,	"QSFP1"),
1129  /* 3*/	EE(ASIC_QSFP2,	handle_qsfp_int,	"QSFP2"),
1130  /* 4*/	{ 0, 0, 0, NULL }, /* TCritInt */
1131  	/* rest are reserved */
1132  };
1133  
1134  /*
1135   * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1136   * register can not be derived from the MTU value because 10K is not
1137   * a power of 2. Therefore, we need a constant. Everything else can
1138   * be calculated.
1139   */
1140  #define DCC_CFG_PORT_MTU_CAP_10240 7
1141  
1142  /*
1143   * Table of the DC grouping of error interrupts.  Each entry refers to
1144   * another register containing more information.
1145   */
1146  static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1147  /* 0*/	DC_EE1(DCC_ERR,		handle_dcc_err,	       "DCC Err"),
1148  /* 1*/	DC_EE2(DC_LCB_ERR,	handle_lcb_err,	       "LCB Err"),
1149  /* 2*/	DC_EE2(DC_DC8051_ERR,	handle_8051_interrupt, "DC8051 Interrupt"),
1150  /* 3*/	/* dc_lbm_int - special, see is_dc_int() */
1151  	/* the rest are reserved */
1152  };
1153  
1154  struct cntr_entry {
1155  	/*
1156  	 * counter name
1157  	 */
1158  	char *name;
1159  
1160  	/*
1161  	 * csr to read for name (if applicable)
1162  	 */
1163  	u64 csr;
1164  
1165  	/*
1166  	 * offset into dd or ppd to store the counter's value
1167  	 */
1168  	int offset;
1169  
1170  	/*
1171  	 * flags
1172  	 */
1173  	u8 flags;
1174  
1175  	/*
1176  	 * accessor for stat element, context either dd or ppd
1177  	 */
1178  	u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1179  		       int mode, u64 data);
1180  };
1181  
1182  #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1183  #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1184  
1185  #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1186  { \
1187  	name, \
1188  	csr, \
1189  	offset, \
1190  	flags, \
1191  	accessor \
1192  }
1193  
1194  /* 32bit RXE */
1195  #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196  CNTR_ELEM(#name, \
1197  	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1198  	  0, flags | CNTR_32BIT, \
1199  	  port_access_u32_csr)
1200  
1201  #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1202  CNTR_ELEM(#name, \
1203  	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1204  	  0, flags | CNTR_32BIT, \
1205  	  dev_access_u32_csr)
1206  
1207  /* 64bit RXE */
1208  #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1209  CNTR_ELEM(#name, \
1210  	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1211  	  0, flags, \
1212  	  port_access_u64_csr)
1213  
1214  #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1215  CNTR_ELEM(#name, \
1216  	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1217  	  0, flags, \
1218  	  dev_access_u64_csr)
1219  
1220  #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1221  #define OVR_ELM(ctx) \
1222  CNTR_ELEM("RcvHdrOvr" #ctx, \
1223  	  (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1224  	  0, CNTR_NORMAL, port_access_u64_csr)
1225  
1226  /* 32bit TXE */
1227  #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1228  CNTR_ELEM(#name, \
1229  	  (counter * 8 + SEND_COUNTER_ARRAY32), \
1230  	  0, flags | CNTR_32BIT, \
1231  	  port_access_u32_csr)
1232  
1233  /* 64bit TXE */
1234  #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1235  CNTR_ELEM(#name, \
1236  	  (counter * 8 + SEND_COUNTER_ARRAY64), \
1237  	  0, flags, \
1238  	  port_access_u64_csr)
1239  
1240  # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1241  CNTR_ELEM(#name,\
1242  	  counter * 8 + SEND_COUNTER_ARRAY64, \
1243  	  0, \
1244  	  flags, \
1245  	  dev_access_u64_csr)
1246  
1247  /* CCE */
1248  #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1249  CNTR_ELEM(#name, \
1250  	  (counter * 8 + CCE_COUNTER_ARRAY32), \
1251  	  0, flags | CNTR_32BIT, \
1252  	  dev_access_u32_csr)
1253  
1254  #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1255  CNTR_ELEM(#name, \
1256  	  (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1257  	  0, flags | CNTR_32BIT, \
1258  	  dev_access_u32_csr)
1259  
1260  /* DC */
1261  #define DC_PERF_CNTR(name, counter, flags) \
1262  CNTR_ELEM(#name, \
1263  	  counter, \
1264  	  0, \
1265  	  flags, \
1266  	  dev_access_u64_csr)
1267  
1268  #define DC_PERF_CNTR_LCB(name, counter, flags) \
1269  CNTR_ELEM(#name, \
1270  	  counter, \
1271  	  0, \
1272  	  flags, \
1273  	  dc_access_lcb_cntr)
1274  
1275  /* ibp counters */
1276  #define SW_IBP_CNTR(name, cntr) \
1277  CNTR_ELEM(#name, \
1278  	  0, \
1279  	  0, \
1280  	  CNTR_SYNTH, \
1281  	  access_ibp_##cntr)
1282  
1283  /**
1284   * hfi1_addr_from_offset - return addr for readq/writeq
1285   * @dd: the dd device
1286   * @offset: the offset of the CSR within bar0
1287   *
1288   * This routine selects the appropriate base address
1289   * based on the indicated offset.
1290   */
hfi1_addr_from_offset(const struct hfi1_devdata * dd,u32 offset)1291  static inline void __iomem *hfi1_addr_from_offset(
1292  	const struct hfi1_devdata *dd,
1293  	u32 offset)
1294  {
1295  	if (offset >= dd->base2_start)
1296  		return dd->kregbase2 + (offset - dd->base2_start);
1297  	return dd->kregbase1 + offset;
1298  }
1299  
1300  /**
1301   * read_csr - read CSR at the indicated offset
1302   * @dd: the dd device
1303   * @offset: the offset of the CSR within bar0
1304   *
1305   * Return: the value read or all FF's if there
1306   * is no mapping
1307   */
read_csr(const struct hfi1_devdata * dd,u32 offset)1308  u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1309  {
1310  	if (dd->flags & HFI1_PRESENT)
1311  		return readq(hfi1_addr_from_offset(dd, offset));
1312  	return -1;
1313  }
1314  
1315  /**
1316   * write_csr - write CSR at the indicated offset
1317   * @dd: the dd device
1318   * @offset: the offset of the CSR within bar0
1319   * @value: value to write
1320   */
write_csr(const struct hfi1_devdata * dd,u32 offset,u64 value)1321  void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1322  {
1323  	if (dd->flags & HFI1_PRESENT) {
1324  		void __iomem *base = hfi1_addr_from_offset(dd, offset);
1325  
1326  		/* avoid write to RcvArray */
1327  		if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1328  			return;
1329  		writeq(value, base);
1330  	}
1331  }
1332  
1333  /**
1334   * get_csr_addr - return te iomem address for offset
1335   * @dd: the dd device
1336   * @offset: the offset of the CSR within bar0
1337   *
1338   * Return: The iomem address to use in subsequent
1339   * writeq/readq operations.
1340   */
get_csr_addr(const struct hfi1_devdata * dd,u32 offset)1341  void __iomem *get_csr_addr(
1342  	const struct hfi1_devdata *dd,
1343  	u32 offset)
1344  {
1345  	if (dd->flags & HFI1_PRESENT)
1346  		return hfi1_addr_from_offset(dd, offset);
1347  	return NULL;
1348  }
1349  
read_write_csr(const struct hfi1_devdata * dd,u32 csr,int mode,u64 value)1350  static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1351  				 int mode, u64 value)
1352  {
1353  	u64 ret;
1354  
1355  	if (mode == CNTR_MODE_R) {
1356  		ret = read_csr(dd, csr);
1357  	} else if (mode == CNTR_MODE_W) {
1358  		write_csr(dd, csr, value);
1359  		ret = value;
1360  	} else {
1361  		dd_dev_err(dd, "Invalid cntr register access mode");
1362  		return 0;
1363  	}
1364  
1365  	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1366  	return ret;
1367  }
1368  
1369  /* Dev Access */
dev_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1370  static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1371  			      void *context, int vl, int mode, u64 data)
1372  {
1373  	struct hfi1_devdata *dd = context;
1374  	u64 csr = entry->csr;
1375  
1376  	if (entry->flags & CNTR_SDMA) {
1377  		if (vl == CNTR_INVALID_VL)
1378  			return 0;
1379  		csr += 0x100 * vl;
1380  	} else {
1381  		if (vl != CNTR_INVALID_VL)
1382  			return 0;
1383  	}
1384  	return read_write_csr(dd, csr, mode, data);
1385  }
1386  
access_sde_err_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1387  static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1388  			      void *context, int idx, int mode, u64 data)
1389  {
1390  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1391  
1392  	if (dd->per_sdma && idx < dd->num_sdma)
1393  		return dd->per_sdma[idx].err_cnt;
1394  	return 0;
1395  }
1396  
access_sde_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1397  static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1398  			      void *context, int idx, int mode, u64 data)
1399  {
1400  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1401  
1402  	if (dd->per_sdma && idx < dd->num_sdma)
1403  		return dd->per_sdma[idx].sdma_int_cnt;
1404  	return 0;
1405  }
1406  
access_sde_idle_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1407  static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1408  				   void *context, int idx, int mode, u64 data)
1409  {
1410  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1411  
1412  	if (dd->per_sdma && idx < dd->num_sdma)
1413  		return dd->per_sdma[idx].idle_int_cnt;
1414  	return 0;
1415  }
1416  
access_sde_progress_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1417  static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1418  				       void *context, int idx, int mode,
1419  				       u64 data)
1420  {
1421  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1422  
1423  	if (dd->per_sdma && idx < dd->num_sdma)
1424  		return dd->per_sdma[idx].progress_int_cnt;
1425  	return 0;
1426  }
1427  
dev_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1428  static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1429  			      int vl, int mode, u64 data)
1430  {
1431  	struct hfi1_devdata *dd = context;
1432  
1433  	u64 val = 0;
1434  	u64 csr = entry->csr;
1435  
1436  	if (entry->flags & CNTR_VL) {
1437  		if (vl == CNTR_INVALID_VL)
1438  			return 0;
1439  		csr += 8 * vl;
1440  	} else {
1441  		if (vl != CNTR_INVALID_VL)
1442  			return 0;
1443  	}
1444  
1445  	val = read_write_csr(dd, csr, mode, data);
1446  	return val;
1447  }
1448  
dc_access_lcb_cntr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1449  static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1450  			      int vl, int mode, u64 data)
1451  {
1452  	struct hfi1_devdata *dd = context;
1453  	u32 csr = entry->csr;
1454  	int ret = 0;
1455  
1456  	if (vl != CNTR_INVALID_VL)
1457  		return 0;
1458  	if (mode == CNTR_MODE_R)
1459  		ret = read_lcb_csr(dd, csr, &data);
1460  	else if (mode == CNTR_MODE_W)
1461  		ret = write_lcb_csr(dd, csr, data);
1462  
1463  	if (ret) {
1464  		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1465  		return 0;
1466  	}
1467  
1468  	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1469  	return data;
1470  }
1471  
1472  /* Port Access */
port_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1473  static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1474  			       int vl, int mode, u64 data)
1475  {
1476  	struct hfi1_pportdata *ppd = context;
1477  
1478  	if (vl != CNTR_INVALID_VL)
1479  		return 0;
1480  	return read_write_csr(ppd->dd, entry->csr, mode, data);
1481  }
1482  
port_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1483  static u64 port_access_u64_csr(const struct cntr_entry *entry,
1484  			       void *context, int vl, int mode, u64 data)
1485  {
1486  	struct hfi1_pportdata *ppd = context;
1487  	u64 val;
1488  	u64 csr = entry->csr;
1489  
1490  	if (entry->flags & CNTR_VL) {
1491  		if (vl == CNTR_INVALID_VL)
1492  			return 0;
1493  		csr += 8 * vl;
1494  	} else {
1495  		if (vl != CNTR_INVALID_VL)
1496  			return 0;
1497  	}
1498  	val = read_write_csr(ppd->dd, csr, mode, data);
1499  	return val;
1500  }
1501  
1502  /* Software defined */
read_write_sw(struct hfi1_devdata * dd,u64 * cntr,int mode,u64 data)1503  static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1504  				u64 data)
1505  {
1506  	u64 ret;
1507  
1508  	if (mode == CNTR_MODE_R) {
1509  		ret = *cntr;
1510  	} else if (mode == CNTR_MODE_W) {
1511  		*cntr = data;
1512  		ret = data;
1513  	} else {
1514  		dd_dev_err(dd, "Invalid cntr sw access mode");
1515  		return 0;
1516  	}
1517  
1518  	hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1519  
1520  	return ret;
1521  }
1522  
access_sw_link_dn_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1523  static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1524  				 int vl, int mode, u64 data)
1525  {
1526  	struct hfi1_pportdata *ppd = context;
1527  
1528  	if (vl != CNTR_INVALID_VL)
1529  		return 0;
1530  	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1531  }
1532  
access_sw_link_up_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1533  static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1534  				 int vl, int mode, u64 data)
1535  {
1536  	struct hfi1_pportdata *ppd = context;
1537  
1538  	if (vl != CNTR_INVALID_VL)
1539  		return 0;
1540  	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1541  }
1542  
access_sw_unknown_frame_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1543  static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1544  				       void *context, int vl, int mode,
1545  				       u64 data)
1546  {
1547  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1548  
1549  	if (vl != CNTR_INVALID_VL)
1550  		return 0;
1551  	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1552  }
1553  
access_sw_xmit_discards(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1554  static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1555  				   void *context, int vl, int mode, u64 data)
1556  {
1557  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1558  	u64 zero = 0;
1559  	u64 *counter;
1560  
1561  	if (vl == CNTR_INVALID_VL)
1562  		counter = &ppd->port_xmit_discards;
1563  	else if (vl >= 0 && vl < C_VL_COUNT)
1564  		counter = &ppd->port_xmit_discards_vl[vl];
1565  	else
1566  		counter = &zero;
1567  
1568  	return read_write_sw(ppd->dd, counter, mode, data);
1569  }
1570  
access_xmit_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1571  static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1572  				       void *context, int vl, int mode,
1573  				       u64 data)
1574  {
1575  	struct hfi1_pportdata *ppd = context;
1576  
1577  	if (vl != CNTR_INVALID_VL)
1578  		return 0;
1579  
1580  	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1581  			     mode, data);
1582  }
1583  
access_rcv_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1584  static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1585  				      void *context, int vl, int mode, u64 data)
1586  {
1587  	struct hfi1_pportdata *ppd = context;
1588  
1589  	if (vl != CNTR_INVALID_VL)
1590  		return 0;
1591  
1592  	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1593  			     mode, data);
1594  }
1595  
get_all_cpu_total(u64 __percpu * cntr)1596  u64 get_all_cpu_total(u64 __percpu *cntr)
1597  {
1598  	int cpu;
1599  	u64 counter = 0;
1600  
1601  	for_each_possible_cpu(cpu)
1602  		counter += *per_cpu_ptr(cntr, cpu);
1603  	return counter;
1604  }
1605  
read_write_cpu(struct hfi1_devdata * dd,u64 * z_val,u64 __percpu * cntr,int vl,int mode,u64 data)1606  static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1607  			  u64 __percpu *cntr,
1608  			  int vl, int mode, u64 data)
1609  {
1610  	u64 ret = 0;
1611  
1612  	if (vl != CNTR_INVALID_VL)
1613  		return 0;
1614  
1615  	if (mode == CNTR_MODE_R) {
1616  		ret = get_all_cpu_total(cntr) - *z_val;
1617  	} else if (mode == CNTR_MODE_W) {
1618  		/* A write can only zero the counter */
1619  		if (data == 0)
1620  			*z_val = get_all_cpu_total(cntr);
1621  		else
1622  			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1623  	} else {
1624  		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1625  		return 0;
1626  	}
1627  
1628  	return ret;
1629  }
1630  
access_sw_cpu_intr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1631  static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1632  			      void *context, int vl, int mode, u64 data)
1633  {
1634  	struct hfi1_devdata *dd = context;
1635  
1636  	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1637  			      mode, data);
1638  }
1639  
access_sw_cpu_rcv_limit(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1640  static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1641  				   void *context, int vl, int mode, u64 data)
1642  {
1643  	struct hfi1_devdata *dd = context;
1644  
1645  	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1646  			      mode, data);
1647  }
1648  
access_sw_pio_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1649  static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1650  			      void *context, int vl, int mode, u64 data)
1651  {
1652  	struct hfi1_devdata *dd = context;
1653  
1654  	return dd->verbs_dev.n_piowait;
1655  }
1656  
access_sw_pio_drain(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1657  static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1658  			       void *context, int vl, int mode, u64 data)
1659  {
1660  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661  
1662  	return dd->verbs_dev.n_piodrain;
1663  }
1664  
access_sw_ctx0_seq_drop(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1665  static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1666  				   void *context, int vl, int mode, u64 data)
1667  {
1668  	struct hfi1_devdata *dd = context;
1669  
1670  	return dd->ctx0_seq_drop;
1671  }
1672  
access_sw_vtx_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1673  static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1674  			      void *context, int vl, int mode, u64 data)
1675  {
1676  	struct hfi1_devdata *dd = context;
1677  
1678  	return dd->verbs_dev.n_txwait;
1679  }
1680  
access_sw_kmem_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1681  static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1682  			       void *context, int vl, int mode, u64 data)
1683  {
1684  	struct hfi1_devdata *dd = context;
1685  
1686  	return dd->verbs_dev.n_kmem_wait;
1687  }
1688  
access_sw_send_schedule(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1689  static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1690  				   void *context, int vl, int mode, u64 data)
1691  {
1692  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693  
1694  	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1695  			      mode, data);
1696  }
1697  
1698  /* Software counters for the error status bits within MISC_ERR_STATUS */
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1699  static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1700  					     void *context, int vl, int mode,
1701  					     u64 data)
1702  {
1703  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1704  
1705  	return dd->misc_err_status_cnt[12];
1706  }
1707  
access_misc_mbist_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1708  static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1709  					  void *context, int vl, int mode,
1710  					  u64 data)
1711  {
1712  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1713  
1714  	return dd->misc_err_status_cnt[11];
1715  }
1716  
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1717  static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1718  					       void *context, int vl, int mode,
1719  					       u64 data)
1720  {
1721  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1722  
1723  	return dd->misc_err_status_cnt[10];
1724  }
1725  
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1726  static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1727  						 void *context, int vl,
1728  						 int mode, u64 data)
1729  {
1730  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1731  
1732  	return dd->misc_err_status_cnt[9];
1733  }
1734  
access_misc_efuse_write_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1735  static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1736  					   void *context, int vl, int mode,
1737  					   u64 data)
1738  {
1739  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1740  
1741  	return dd->misc_err_status_cnt[8];
1742  }
1743  
access_misc_efuse_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1744  static u64 access_misc_efuse_read_bad_addr_err_cnt(
1745  				const struct cntr_entry *entry,
1746  				void *context, int vl, int mode, u64 data)
1747  {
1748  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1749  
1750  	return dd->misc_err_status_cnt[7];
1751  }
1752  
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1753  static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1754  						void *context, int vl,
1755  						int mode, u64 data)
1756  {
1757  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1758  
1759  	return dd->misc_err_status_cnt[6];
1760  }
1761  
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1762  static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1763  					      void *context, int vl, int mode,
1764  					      u64 data)
1765  {
1766  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767  
1768  	return dd->misc_err_status_cnt[5];
1769  }
1770  
access_misc_key_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1771  static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1772  					    void *context, int vl, int mode,
1773  					    u64 data)
1774  {
1775  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776  
1777  	return dd->misc_err_status_cnt[4];
1778  }
1779  
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1780  static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1781  						 void *context, int vl,
1782  						 int mode, u64 data)
1783  {
1784  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785  
1786  	return dd->misc_err_status_cnt[3];
1787  }
1788  
access_misc_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1789  static u64 access_misc_csr_write_bad_addr_err_cnt(
1790  				const struct cntr_entry *entry,
1791  				void *context, int vl, int mode, u64 data)
1792  {
1793  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794  
1795  	return dd->misc_err_status_cnt[2];
1796  }
1797  
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1798  static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1799  						 void *context, int vl,
1800  						 int mode, u64 data)
1801  {
1802  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803  
1804  	return dd->misc_err_status_cnt[1];
1805  }
1806  
access_misc_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1807  static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1808  					  void *context, int vl, int mode,
1809  					  u64 data)
1810  {
1811  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812  
1813  	return dd->misc_err_status_cnt[0];
1814  }
1815  
1816  /*
1817   * Software counter for the aggregate of
1818   * individual CceErrStatus counters
1819   */
access_sw_cce_err_status_aggregated_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1820  static u64 access_sw_cce_err_status_aggregated_cnt(
1821  				const struct cntr_entry *entry,
1822  				void *context, int vl, int mode, u64 data)
1823  {
1824  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1825  
1826  	return dd->sw_cce_err_status_aggregate;
1827  }
1828  
1829  /*
1830   * Software counters corresponding to each of the
1831   * error status bits within CceErrStatus
1832   */
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1833  static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1834  					      void *context, int vl, int mode,
1835  					      u64 data)
1836  {
1837  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1838  
1839  	return dd->cce_err_status_cnt[40];
1840  }
1841  
access_cce_int_map_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1842  static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1843  					  void *context, int vl, int mode,
1844  					  u64 data)
1845  {
1846  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847  
1848  	return dd->cce_err_status_cnt[39];
1849  }
1850  
access_cce_int_map_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1851  static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1852  					  void *context, int vl, int mode,
1853  					  u64 data)
1854  {
1855  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856  
1857  	return dd->cce_err_status_cnt[38];
1858  }
1859  
access_cce_msix_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1860  static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1861  					     void *context, int vl, int mode,
1862  					     u64 data)
1863  {
1864  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865  
1866  	return dd->cce_err_status_cnt[37];
1867  }
1868  
access_cce_msix_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1869  static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1870  					     void *context, int vl, int mode,
1871  					     u64 data)
1872  {
1873  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874  
1875  	return dd->cce_err_status_cnt[36];
1876  }
1877  
access_cce_rxdma_conv_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1878  static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1879  				const struct cntr_entry *entry,
1880  				void *context, int vl, int mode, u64 data)
1881  {
1882  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883  
1884  	return dd->cce_err_status_cnt[35];
1885  }
1886  
access_cce_rcpl_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1887  static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1888  				const struct cntr_entry *entry,
1889  				void *context, int vl, int mode, u64 data)
1890  {
1891  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892  
1893  	return dd->cce_err_status_cnt[34];
1894  }
1895  
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1896  static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1897  						 void *context, int vl,
1898  						 int mode, u64 data)
1899  {
1900  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901  
1902  	return dd->cce_err_status_cnt[33];
1903  }
1904  
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1905  static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1906  						void *context, int vl, int mode,
1907  						u64 data)
1908  {
1909  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910  
1911  	return dd->cce_err_status_cnt[32];
1912  }
1913  
access_la_triggered_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1914  static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1915  				   void *context, int vl, int mode, u64 data)
1916  {
1917  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918  
1919  	return dd->cce_err_status_cnt[31];
1920  }
1921  
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1922  static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1923  					       void *context, int vl, int mode,
1924  					       u64 data)
1925  {
1926  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927  
1928  	return dd->cce_err_status_cnt[30];
1929  }
1930  
access_pcic_receive_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1931  static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1932  					      void *context, int vl, int mode,
1933  					      u64 data)
1934  {
1935  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936  
1937  	return dd->cce_err_status_cnt[29];
1938  }
1939  
access_pcic_transmit_back_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1940  static u64 access_pcic_transmit_back_parity_err_cnt(
1941  				const struct cntr_entry *entry,
1942  				void *context, int vl, int mode, u64 data)
1943  {
1944  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945  
1946  	return dd->cce_err_status_cnt[28];
1947  }
1948  
access_pcic_transmit_front_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1949  static u64 access_pcic_transmit_front_parity_err_cnt(
1950  				const struct cntr_entry *entry,
1951  				void *context, int vl, int mode, u64 data)
1952  {
1953  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954  
1955  	return dd->cce_err_status_cnt[27];
1956  }
1957  
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1958  static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1959  					     void *context, int vl, int mode,
1960  					     u64 data)
1961  {
1962  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963  
1964  	return dd->cce_err_status_cnt[26];
1965  }
1966  
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1967  static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1968  					    void *context, int vl, int mode,
1969  					    u64 data)
1970  {
1971  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972  
1973  	return dd->cce_err_status_cnt[25];
1974  }
1975  
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1976  static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1977  					      void *context, int vl, int mode,
1978  					      u64 data)
1979  {
1980  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981  
1982  	return dd->cce_err_status_cnt[24];
1983  }
1984  
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1985  static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1986  					     void *context, int vl, int mode,
1987  					     u64 data)
1988  {
1989  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990  
1991  	return dd->cce_err_status_cnt[23];
1992  }
1993  
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1994  static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1995  						 void *context, int vl,
1996  						 int mode, u64 data)
1997  {
1998  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999  
2000  	return dd->cce_err_status_cnt[22];
2001  }
2002  
access_pcic_retry_mem_unc_err(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2003  static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2004  					 void *context, int vl, int mode,
2005  					 u64 data)
2006  {
2007  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008  
2009  	return dd->cce_err_status_cnt[21];
2010  }
2011  
access_pcic_n_post_dat_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2012  static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2013  				const struct cntr_entry *entry,
2014  				void *context, int vl, int mode, u64 data)
2015  {
2016  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017  
2018  	return dd->cce_err_status_cnt[20];
2019  }
2020  
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2021  static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2022  						 void *context, int vl,
2023  						 int mode, u64 data)
2024  {
2025  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026  
2027  	return dd->cce_err_status_cnt[19];
2028  }
2029  
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2030  static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2031  					     void *context, int vl, int mode,
2032  					     u64 data)
2033  {
2034  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035  
2036  	return dd->cce_err_status_cnt[18];
2037  }
2038  
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2039  static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2040  					    void *context, int vl, int mode,
2041  					    u64 data)
2042  {
2043  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044  
2045  	return dd->cce_err_status_cnt[17];
2046  }
2047  
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2048  static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2049  					      void *context, int vl, int mode,
2050  					      u64 data)
2051  {
2052  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053  
2054  	return dd->cce_err_status_cnt[16];
2055  }
2056  
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2057  static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2058  					     void *context, int vl, int mode,
2059  					     u64 data)
2060  {
2061  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2062  
2063  	return dd->cce_err_status_cnt[15];
2064  }
2065  
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2066  static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2067  						 void *context, int vl,
2068  						 int mode, u64 data)
2069  {
2070  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2071  
2072  	return dd->cce_err_status_cnt[14];
2073  }
2074  
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2075  static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2076  					     void *context, int vl, int mode,
2077  					     u64 data)
2078  {
2079  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2080  
2081  	return dd->cce_err_status_cnt[13];
2082  }
2083  
access_cce_cli1_async_fifo_dbg_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2084  static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2085  				const struct cntr_entry *entry,
2086  				void *context, int vl, int mode, u64 data)
2087  {
2088  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2089  
2090  	return dd->cce_err_status_cnt[12];
2091  }
2092  
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2093  static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2094  				const struct cntr_entry *entry,
2095  				void *context, int vl, int mode, u64 data)
2096  {
2097  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2098  
2099  	return dd->cce_err_status_cnt[11];
2100  }
2101  
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2102  static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2103  				const struct cntr_entry *entry,
2104  				void *context, int vl, int mode, u64 data)
2105  {
2106  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2107  
2108  	return dd->cce_err_status_cnt[10];
2109  }
2110  
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2111  static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2112  				const struct cntr_entry *entry,
2113  				void *context, int vl, int mode, u64 data)
2114  {
2115  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2116  
2117  	return dd->cce_err_status_cnt[9];
2118  }
2119  
access_cce_cli2_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2120  static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2121  				const struct cntr_entry *entry,
2122  				void *context, int vl, int mode, u64 data)
2123  {
2124  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2125  
2126  	return dd->cce_err_status_cnt[8];
2127  }
2128  
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2129  static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2130  						 void *context, int vl,
2131  						 int mode, u64 data)
2132  {
2133  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2134  
2135  	return dd->cce_err_status_cnt[7];
2136  }
2137  
access_cce_cli0_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2138  static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2139  				const struct cntr_entry *entry,
2140  				void *context, int vl, int mode, u64 data)
2141  {
2142  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2143  
2144  	return dd->cce_err_status_cnt[6];
2145  }
2146  
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2147  static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2148  					       void *context, int vl, int mode,
2149  					       u64 data)
2150  {
2151  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2152  
2153  	return dd->cce_err_status_cnt[5];
2154  }
2155  
access_cce_trgt_access_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2156  static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2157  					  void *context, int vl, int mode,
2158  					  u64 data)
2159  {
2160  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2161  
2162  	return dd->cce_err_status_cnt[4];
2163  }
2164  
access_cce_trgt_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2165  static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2166  				const struct cntr_entry *entry,
2167  				void *context, int vl, int mode, u64 data)
2168  {
2169  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2170  
2171  	return dd->cce_err_status_cnt[3];
2172  }
2173  
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2174  static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2175  						 void *context, int vl,
2176  						 int mode, u64 data)
2177  {
2178  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2179  
2180  	return dd->cce_err_status_cnt[2];
2181  }
2182  
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2183  static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2184  						void *context, int vl,
2185  						int mode, u64 data)
2186  {
2187  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2188  
2189  	return dd->cce_err_status_cnt[1];
2190  }
2191  
access_ccs_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2192  static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2193  					 void *context, int vl, int mode,
2194  					 u64 data)
2195  {
2196  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2197  
2198  	return dd->cce_err_status_cnt[0];
2199  }
2200  
2201  /*
2202   * Software counters corresponding to each of the
2203   * error status bits within RcvErrStatus
2204   */
access_rx_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2205  static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2206  					void *context, int vl, int mode,
2207  					u64 data)
2208  {
2209  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210  
2211  	return dd->rcv_err_status_cnt[63];
2212  }
2213  
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2214  static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2215  						void *context, int vl,
2216  						int mode, u64 data)
2217  {
2218  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219  
2220  	return dd->rcv_err_status_cnt[62];
2221  }
2222  
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2223  static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2224  					       void *context, int vl, int mode,
2225  					       u64 data)
2226  {
2227  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228  
2229  	return dd->rcv_err_status_cnt[61];
2230  }
2231  
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2232  static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2233  					 void *context, int vl, int mode,
2234  					 u64 data)
2235  {
2236  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237  
2238  	return dd->rcv_err_status_cnt[60];
2239  }
2240  
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2241  static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2242  						 void *context, int vl,
2243  						 int mode, u64 data)
2244  {
2245  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246  
2247  	return dd->rcv_err_status_cnt[59];
2248  }
2249  
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2250  static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2251  						 void *context, int vl,
2252  						 int mode, u64 data)
2253  {
2254  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255  
2256  	return dd->rcv_err_status_cnt[58];
2257  }
2258  
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2259  static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2260  					    void *context, int vl, int mode,
2261  					    u64 data)
2262  {
2263  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264  
2265  	return dd->rcv_err_status_cnt[57];
2266  }
2267  
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2268  static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2269  					   void *context, int vl, int mode,
2270  					   u64 data)
2271  {
2272  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273  
2274  	return dd->rcv_err_status_cnt[56];
2275  }
2276  
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2277  static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2278  					   void *context, int vl, int mode,
2279  					   u64 data)
2280  {
2281  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282  
2283  	return dd->rcv_err_status_cnt[55];
2284  }
2285  
access_rx_dma_data_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2286  static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2287  				const struct cntr_entry *entry,
2288  				void *context, int vl, int mode, u64 data)
2289  {
2290  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291  
2292  	return dd->rcv_err_status_cnt[54];
2293  }
2294  
access_rx_dma_data_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2295  static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2296  				const struct cntr_entry *entry,
2297  				void *context, int vl, int mode, u64 data)
2298  {
2299  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300  
2301  	return dd->rcv_err_status_cnt[53];
2302  }
2303  
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2304  static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2305  						 void *context, int vl,
2306  						 int mode, u64 data)
2307  {
2308  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309  
2310  	return dd->rcv_err_status_cnt[52];
2311  }
2312  
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2313  static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2314  						 void *context, int vl,
2315  						 int mode, u64 data)
2316  {
2317  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318  
2319  	return dd->rcv_err_status_cnt[51];
2320  }
2321  
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2322  static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2323  						 void *context, int vl,
2324  						 int mode, u64 data)
2325  {
2326  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327  
2328  	return dd->rcv_err_status_cnt[50];
2329  }
2330  
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2331  static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2332  						 void *context, int vl,
2333  						 int mode, u64 data)
2334  {
2335  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336  
2337  	return dd->rcv_err_status_cnt[49];
2338  }
2339  
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2340  static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2341  						 void *context, int vl,
2342  						 int mode, u64 data)
2343  {
2344  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345  
2346  	return dd->rcv_err_status_cnt[48];
2347  }
2348  
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2349  static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2350  						 void *context, int vl,
2351  						 int mode, u64 data)
2352  {
2353  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354  
2355  	return dd->rcv_err_status_cnt[47];
2356  }
2357  
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2358  static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2359  					 void *context, int vl, int mode,
2360  					 u64 data)
2361  {
2362  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363  
2364  	return dd->rcv_err_status_cnt[46];
2365  }
2366  
access_rx_hq_intr_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2367  static u64 access_rx_hq_intr_csr_parity_err_cnt(
2368  				const struct cntr_entry *entry,
2369  				void *context, int vl, int mode, u64 data)
2370  {
2371  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372  
2373  	return dd->rcv_err_status_cnt[45];
2374  }
2375  
access_rx_lookup_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2376  static u64 access_rx_lookup_csr_parity_err_cnt(
2377  				const struct cntr_entry *entry,
2378  				void *context, int vl, int mode, u64 data)
2379  {
2380  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381  
2382  	return dd->rcv_err_status_cnt[44];
2383  }
2384  
access_rx_lookup_rcv_array_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2385  static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2386  				const struct cntr_entry *entry,
2387  				void *context, int vl, int mode, u64 data)
2388  {
2389  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390  
2391  	return dd->rcv_err_status_cnt[43];
2392  }
2393  
access_rx_lookup_rcv_array_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2394  static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2395  				const struct cntr_entry *entry,
2396  				void *context, int vl, int mode, u64 data)
2397  {
2398  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399  
2400  	return dd->rcv_err_status_cnt[42];
2401  }
2402  
access_rx_lookup_des_part2_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2403  static u64 access_rx_lookup_des_part2_parity_err_cnt(
2404  				const struct cntr_entry *entry,
2405  				void *context, int vl, int mode, u64 data)
2406  {
2407  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408  
2409  	return dd->rcv_err_status_cnt[41];
2410  }
2411  
access_rx_lookup_des_part1_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2412  static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2413  				const struct cntr_entry *entry,
2414  				void *context, int vl, int mode, u64 data)
2415  {
2416  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417  
2418  	return dd->rcv_err_status_cnt[40];
2419  }
2420  
access_rx_lookup_des_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2421  static u64 access_rx_lookup_des_part1_unc_err_cnt(
2422  				const struct cntr_entry *entry,
2423  				void *context, int vl, int mode, u64 data)
2424  {
2425  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426  
2427  	return dd->rcv_err_status_cnt[39];
2428  }
2429  
access_rx_rbuf_next_free_buf_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2430  static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2431  				const struct cntr_entry *entry,
2432  				void *context, int vl, int mode, u64 data)
2433  {
2434  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435  
2436  	return dd->rcv_err_status_cnt[38];
2437  }
2438  
access_rx_rbuf_next_free_buf_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2439  static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2440  				const struct cntr_entry *entry,
2441  				void *context, int vl, int mode, u64 data)
2442  {
2443  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444  
2445  	return dd->rcv_err_status_cnt[37];
2446  }
2447  
access_rbuf_fl_init_wr_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2448  static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2449  				const struct cntr_entry *entry,
2450  				void *context, int vl, int mode, u64 data)
2451  {
2452  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453  
2454  	return dd->rcv_err_status_cnt[36];
2455  }
2456  
access_rx_rbuf_fl_initdone_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2457  static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2458  				const struct cntr_entry *entry,
2459  				void *context, int vl, int mode, u64 data)
2460  {
2461  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462  
2463  	return dd->rcv_err_status_cnt[35];
2464  }
2465  
access_rx_rbuf_fl_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2466  static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2467  				const struct cntr_entry *entry,
2468  				void *context, int vl, int mode, u64 data)
2469  {
2470  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471  
2472  	return dd->rcv_err_status_cnt[34];
2473  }
2474  
access_rx_rbuf_fl_rd_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2475  static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2476  				const struct cntr_entry *entry,
2477  				void *context, int vl, int mode, u64 data)
2478  {
2479  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480  
2481  	return dd->rcv_err_status_cnt[33];
2482  }
2483  
access_rx_rbuf_empty_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2484  static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2485  					void *context, int vl, int mode,
2486  					u64 data)
2487  {
2488  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489  
2490  	return dd->rcv_err_status_cnt[32];
2491  }
2492  
access_rx_rbuf_full_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2493  static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2494  				       void *context, int vl, int mode,
2495  				       u64 data)
2496  {
2497  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498  
2499  	return dd->rcv_err_status_cnt[31];
2500  }
2501  
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2502  static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2503  					  void *context, int vl, int mode,
2504  					  u64 data)
2505  {
2506  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507  
2508  	return dd->rcv_err_status_cnt[30];
2509  }
2510  
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2511  static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2512  					     void *context, int vl, int mode,
2513  					     u64 data)
2514  {
2515  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516  
2517  	return dd->rcv_err_status_cnt[29];
2518  }
2519  
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2520  static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2521  						 void *context, int vl,
2522  						 int mode, u64 data)
2523  {
2524  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525  
2526  	return dd->rcv_err_status_cnt[28];
2527  }
2528  
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2529  static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2530  				const struct cntr_entry *entry,
2531  				void *context, int vl, int mode, u64 data)
2532  {
2533  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534  
2535  	return dd->rcv_err_status_cnt[27];
2536  }
2537  
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2538  static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2539  				const struct cntr_entry *entry,
2540  				void *context, int vl, int mode, u64 data)
2541  {
2542  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543  
2544  	return dd->rcv_err_status_cnt[26];
2545  }
2546  
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2547  static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2548  				const struct cntr_entry *entry,
2549  				void *context, int vl, int mode, u64 data)
2550  {
2551  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552  
2553  	return dd->rcv_err_status_cnt[25];
2554  }
2555  
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2556  static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2557  				const struct cntr_entry *entry,
2558  				void *context, int vl, int mode, u64 data)
2559  {
2560  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561  
2562  	return dd->rcv_err_status_cnt[24];
2563  }
2564  
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2565  static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2566  				const struct cntr_entry *entry,
2567  				void *context, int vl, int mode, u64 data)
2568  {
2569  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570  
2571  	return dd->rcv_err_status_cnt[23];
2572  }
2573  
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2574  static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2575  				const struct cntr_entry *entry,
2576  				void *context, int vl, int mode, u64 data)
2577  {
2578  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579  
2580  	return dd->rcv_err_status_cnt[22];
2581  }
2582  
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2583  static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2584  				const struct cntr_entry *entry,
2585  				void *context, int vl, int mode, u64 data)
2586  {
2587  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588  
2589  	return dd->rcv_err_status_cnt[21];
2590  }
2591  
access_rx_rbuf_block_list_read_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2592  static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2593  				const struct cntr_entry *entry,
2594  				void *context, int vl, int mode, u64 data)
2595  {
2596  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597  
2598  	return dd->rcv_err_status_cnt[20];
2599  }
2600  
access_rx_rbuf_block_list_read_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2601  static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2602  				const struct cntr_entry *entry,
2603  				void *context, int vl, int mode, u64 data)
2604  {
2605  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606  
2607  	return dd->rcv_err_status_cnt[19];
2608  }
2609  
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2610  static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2611  						 void *context, int vl,
2612  						 int mode, u64 data)
2613  {
2614  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615  
2616  	return dd->rcv_err_status_cnt[18];
2617  }
2618  
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2619  static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2620  						 void *context, int vl,
2621  						 int mode, u64 data)
2622  {
2623  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624  
2625  	return dd->rcv_err_status_cnt[17];
2626  }
2627  
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2628  static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2629  				const struct cntr_entry *entry,
2630  				void *context, int vl, int mode, u64 data)
2631  {
2632  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633  
2634  	return dd->rcv_err_status_cnt[16];
2635  }
2636  
access_rx_rbuf_lookup_des_reg_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2637  static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2638  				const struct cntr_entry *entry,
2639  				void *context, int vl, int mode, u64 data)
2640  {
2641  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2642  
2643  	return dd->rcv_err_status_cnt[15];
2644  }
2645  
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2646  static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2647  						void *context, int vl,
2648  						int mode, u64 data)
2649  {
2650  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2651  
2652  	return dd->rcv_err_status_cnt[14];
2653  }
2654  
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2655  static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2656  						void *context, int vl,
2657  						int mode, u64 data)
2658  {
2659  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2660  
2661  	return dd->rcv_err_status_cnt[13];
2662  }
2663  
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2664  static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2665  					      void *context, int vl, int mode,
2666  					      u64 data)
2667  {
2668  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2669  
2670  	return dd->rcv_err_status_cnt[12];
2671  }
2672  
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2673  static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2674  					  void *context, int vl, int mode,
2675  					  u64 data)
2676  {
2677  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2678  
2679  	return dd->rcv_err_status_cnt[11];
2680  }
2681  
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2682  static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2683  					  void *context, int vl, int mode,
2684  					  u64 data)
2685  {
2686  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2687  
2688  	return dd->rcv_err_status_cnt[10];
2689  }
2690  
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2691  static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2692  					       void *context, int vl, int mode,
2693  					       u64 data)
2694  {
2695  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2696  
2697  	return dd->rcv_err_status_cnt[9];
2698  }
2699  
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2700  static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2701  					    void *context, int vl, int mode,
2702  					    u64 data)
2703  {
2704  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2705  
2706  	return dd->rcv_err_status_cnt[8];
2707  }
2708  
access_rx_rcv_qp_map_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2709  static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2710  				const struct cntr_entry *entry,
2711  				void *context, int vl, int mode, u64 data)
2712  {
2713  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2714  
2715  	return dd->rcv_err_status_cnt[7];
2716  }
2717  
access_rx_rcv_qp_map_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2718  static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2719  				const struct cntr_entry *entry,
2720  				void *context, int vl, int mode, u64 data)
2721  {
2722  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2723  
2724  	return dd->rcv_err_status_cnt[6];
2725  }
2726  
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2727  static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2728  					  void *context, int vl, int mode,
2729  					  u64 data)
2730  {
2731  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2732  
2733  	return dd->rcv_err_status_cnt[5];
2734  }
2735  
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2736  static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2737  					  void *context, int vl, int mode,
2738  					  u64 data)
2739  {
2740  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2741  
2742  	return dd->rcv_err_status_cnt[4];
2743  }
2744  
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2745  static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2746  					 void *context, int vl, int mode,
2747  					 u64 data)
2748  {
2749  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2750  
2751  	return dd->rcv_err_status_cnt[3];
2752  }
2753  
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2754  static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2755  					 void *context, int vl, int mode,
2756  					 u64 data)
2757  {
2758  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2759  
2760  	return dd->rcv_err_status_cnt[2];
2761  }
2762  
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2763  static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2764  					    void *context, int vl, int mode,
2765  					    u64 data)
2766  {
2767  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2768  
2769  	return dd->rcv_err_status_cnt[1];
2770  }
2771  
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2772  static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2773  					 void *context, int vl, int mode,
2774  					 u64 data)
2775  {
2776  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2777  
2778  	return dd->rcv_err_status_cnt[0];
2779  }
2780  
2781  /*
2782   * Software counters corresponding to each of the
2783   * error status bits within SendPioErrStatus
2784   */
access_pio_pec_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2785  static u64 access_pio_pec_sop_head_parity_err_cnt(
2786  				const struct cntr_entry *entry,
2787  				void *context, int vl, int mode, u64 data)
2788  {
2789  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790  
2791  	return dd->send_pio_err_status_cnt[35];
2792  }
2793  
access_pio_pcc_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2794  static u64 access_pio_pcc_sop_head_parity_err_cnt(
2795  				const struct cntr_entry *entry,
2796  				void *context, int vl, int mode, u64 data)
2797  {
2798  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799  
2800  	return dd->send_pio_err_status_cnt[34];
2801  }
2802  
access_pio_last_returned_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2803  static u64 access_pio_last_returned_cnt_parity_err_cnt(
2804  				const struct cntr_entry *entry,
2805  				void *context, int vl, int mode, u64 data)
2806  {
2807  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808  
2809  	return dd->send_pio_err_status_cnt[33];
2810  }
2811  
access_pio_current_free_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2812  static u64 access_pio_current_free_cnt_parity_err_cnt(
2813  				const struct cntr_entry *entry,
2814  				void *context, int vl, int mode, u64 data)
2815  {
2816  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817  
2818  	return dd->send_pio_err_status_cnt[32];
2819  }
2820  
access_pio_reserved_31_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2821  static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2822  					  void *context, int vl, int mode,
2823  					  u64 data)
2824  {
2825  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826  
2827  	return dd->send_pio_err_status_cnt[31];
2828  }
2829  
access_pio_reserved_30_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2830  static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2831  					  void *context, int vl, int mode,
2832  					  u64 data)
2833  {
2834  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835  
2836  	return dd->send_pio_err_status_cnt[30];
2837  }
2838  
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2839  static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2840  					   void *context, int vl, int mode,
2841  					   u64 data)
2842  {
2843  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844  
2845  	return dd->send_pio_err_status_cnt[29];
2846  }
2847  
access_pio_ppmc_bqc_mem_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2848  static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2849  				const struct cntr_entry *entry,
2850  				void *context, int vl, int mode, u64 data)
2851  {
2852  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853  
2854  	return dd->send_pio_err_status_cnt[28];
2855  }
2856  
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2857  static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2858  					     void *context, int vl, int mode,
2859  					     u64 data)
2860  {
2861  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862  
2863  	return dd->send_pio_err_status_cnt[27];
2864  }
2865  
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2866  static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2867  					     void *context, int vl, int mode,
2868  					     u64 data)
2869  {
2870  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871  
2872  	return dd->send_pio_err_status_cnt[26];
2873  }
2874  
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2875  static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2876  						void *context, int vl,
2877  						int mode, u64 data)
2878  {
2879  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880  
2881  	return dd->send_pio_err_status_cnt[25];
2882  }
2883  
access_pio_block_qw_count_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2884  static u64 access_pio_block_qw_count_parity_err_cnt(
2885  				const struct cntr_entry *entry,
2886  				void *context, int vl, int mode, u64 data)
2887  {
2888  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889  
2890  	return dd->send_pio_err_status_cnt[24];
2891  }
2892  
access_pio_write_qw_valid_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2893  static u64 access_pio_write_qw_valid_parity_err_cnt(
2894  				const struct cntr_entry *entry,
2895  				void *context, int vl, int mode, u64 data)
2896  {
2897  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898  
2899  	return dd->send_pio_err_status_cnt[23];
2900  }
2901  
access_pio_state_machine_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2902  static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2903  					    void *context, int vl, int mode,
2904  					    u64 data)
2905  {
2906  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907  
2908  	return dd->send_pio_err_status_cnt[22];
2909  }
2910  
access_pio_write_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2911  static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2912  						void *context, int vl,
2913  						int mode, u64 data)
2914  {
2915  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916  
2917  	return dd->send_pio_err_status_cnt[21];
2918  }
2919  
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2920  static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2921  						void *context, int vl,
2922  						int mode, u64 data)
2923  {
2924  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925  
2926  	return dd->send_pio_err_status_cnt[20];
2927  }
2928  
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2929  static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2930  						void *context, int vl,
2931  						int mode, u64 data)
2932  {
2933  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934  
2935  	return dd->send_pio_err_status_cnt[19];
2936  }
2937  
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2938  static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2939  				const struct cntr_entry *entry,
2940  				void *context, int vl, int mode, u64 data)
2941  {
2942  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943  
2944  	return dd->send_pio_err_status_cnt[18];
2945  }
2946  
access_pio_init_sm_in_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2947  static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2948  					 void *context, int vl, int mode,
2949  					 u64 data)
2950  {
2951  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952  
2953  	return dd->send_pio_err_status_cnt[17];
2954  }
2955  
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2956  static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2957  					    void *context, int vl, int mode,
2958  					    u64 data)
2959  {
2960  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961  
2962  	return dd->send_pio_err_status_cnt[16];
2963  }
2964  
access_pio_credit_ret_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2965  static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2966  				const struct cntr_entry *entry,
2967  				void *context, int vl, int mode, u64 data)
2968  {
2969  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2970  
2971  	return dd->send_pio_err_status_cnt[15];
2972  }
2973  
access_pio_v1_len_mem_bank1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2974  static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2975  				const struct cntr_entry *entry,
2976  				void *context, int vl, int mode, u64 data)
2977  {
2978  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2979  
2980  	return dd->send_pio_err_status_cnt[14];
2981  }
2982  
access_pio_v1_len_mem_bank0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2983  static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2984  				const struct cntr_entry *entry,
2985  				void *context, int vl, int mode, u64 data)
2986  {
2987  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2988  
2989  	return dd->send_pio_err_status_cnt[13];
2990  }
2991  
access_pio_v1_len_mem_bank1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2992  static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2993  				const struct cntr_entry *entry,
2994  				void *context, int vl, int mode, u64 data)
2995  {
2996  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2997  
2998  	return dd->send_pio_err_status_cnt[12];
2999  }
3000  
access_pio_v1_len_mem_bank0_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3001  static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3002  				const struct cntr_entry *entry,
3003  				void *context, int vl, int mode, u64 data)
3004  {
3005  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3006  
3007  	return dd->send_pio_err_status_cnt[11];
3008  }
3009  
access_pio_sm_pkt_reset_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3010  static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3011  				const struct cntr_entry *entry,
3012  				void *context, int vl, int mode, u64 data)
3013  {
3014  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3015  
3016  	return dd->send_pio_err_status_cnt[10];
3017  }
3018  
access_pio_pkt_evict_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3019  static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3020  				const struct cntr_entry *entry,
3021  				void *context, int vl, int mode, u64 data)
3022  {
3023  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3024  
3025  	return dd->send_pio_err_status_cnt[9];
3026  }
3027  
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3028  static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3029  				const struct cntr_entry *entry,
3030  				void *context, int vl, int mode, u64 data)
3031  {
3032  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3033  
3034  	return dd->send_pio_err_status_cnt[8];
3035  }
3036  
access_pio_sbrdctl_crrel_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3037  static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3038  				const struct cntr_entry *entry,
3039  				void *context, int vl, int mode, u64 data)
3040  {
3041  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3042  
3043  	return dd->send_pio_err_status_cnt[7];
3044  }
3045  
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3046  static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3047  					      void *context, int vl, int mode,
3048  					      u64 data)
3049  {
3050  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3051  
3052  	return dd->send_pio_err_status_cnt[6];
3053  }
3054  
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3055  static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3056  					      void *context, int vl, int mode,
3057  					      u64 data)
3058  {
3059  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3060  
3061  	return dd->send_pio_err_status_cnt[5];
3062  }
3063  
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3064  static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3065  					   void *context, int vl, int mode,
3066  					   u64 data)
3067  {
3068  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3069  
3070  	return dd->send_pio_err_status_cnt[4];
3071  }
3072  
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3073  static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3074  					   void *context, int vl, int mode,
3075  					   u64 data)
3076  {
3077  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3078  
3079  	return dd->send_pio_err_status_cnt[3];
3080  }
3081  
access_pio_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3082  static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3083  					 void *context, int vl, int mode,
3084  					 u64 data)
3085  {
3086  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087  
3088  	return dd->send_pio_err_status_cnt[2];
3089  }
3090  
access_pio_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3091  static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3092  						void *context, int vl,
3093  						int mode, u64 data)
3094  {
3095  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096  
3097  	return dd->send_pio_err_status_cnt[1];
3098  }
3099  
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3100  static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3101  					     void *context, int vl, int mode,
3102  					     u64 data)
3103  {
3104  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105  
3106  	return dd->send_pio_err_status_cnt[0];
3107  }
3108  
3109  /*
3110   * Software counters corresponding to each of the
3111   * error status bits within SendDmaErrStatus
3112   */
access_sdma_pcie_req_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3113  static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3114  				const struct cntr_entry *entry,
3115  				void *context, int vl, int mode, u64 data)
3116  {
3117  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3118  
3119  	return dd->send_dma_err_status_cnt[3];
3120  }
3121  
access_sdma_pcie_req_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3122  static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3123  				const struct cntr_entry *entry,
3124  				void *context, int vl, int mode, u64 data)
3125  {
3126  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3127  
3128  	return dd->send_dma_err_status_cnt[2];
3129  }
3130  
access_sdma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3131  static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3132  					  void *context, int vl, int mode,
3133  					  u64 data)
3134  {
3135  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3136  
3137  	return dd->send_dma_err_status_cnt[1];
3138  }
3139  
access_sdma_rpy_tag_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3140  static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3141  				       void *context, int vl, int mode,
3142  				       u64 data)
3143  {
3144  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3145  
3146  	return dd->send_dma_err_status_cnt[0];
3147  }
3148  
3149  /*
3150   * Software counters corresponding to each of the
3151   * error status bits within SendEgressErrStatus
3152   */
access_tx_read_pio_memory_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3153  static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3154  				const struct cntr_entry *entry,
3155  				void *context, int vl, int mode, u64 data)
3156  {
3157  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158  
3159  	return dd->send_egress_err_status_cnt[63];
3160  }
3161  
access_tx_read_sdma_memory_csr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3162  static u64 access_tx_read_sdma_memory_csr_err_cnt(
3163  				const struct cntr_entry *entry,
3164  				void *context, int vl, int mode, u64 data)
3165  {
3166  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167  
3168  	return dd->send_egress_err_status_cnt[62];
3169  }
3170  
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3171  static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3172  					     void *context, int vl, int mode,
3173  					     u64 data)
3174  {
3175  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176  
3177  	return dd->send_egress_err_status_cnt[61];
3178  }
3179  
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3180  static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3181  						 void *context, int vl,
3182  						 int mode, u64 data)
3183  {
3184  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185  
3186  	return dd->send_egress_err_status_cnt[60];
3187  }
3188  
access_tx_read_sdma_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3189  static u64 access_tx_read_sdma_memory_cor_err_cnt(
3190  				const struct cntr_entry *entry,
3191  				void *context, int vl, int mode, u64 data)
3192  {
3193  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194  
3195  	return dd->send_egress_err_status_cnt[59];
3196  }
3197  
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3198  static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3199  					void *context, int vl, int mode,
3200  					u64 data)
3201  {
3202  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203  
3204  	return dd->send_egress_err_status_cnt[58];
3205  }
3206  
access_tx_credit_overrun_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3207  static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3208  					    void *context, int vl, int mode,
3209  					    u64 data)
3210  {
3211  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212  
3213  	return dd->send_egress_err_status_cnt[57];
3214  }
3215  
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3216  static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3217  					      void *context, int vl, int mode,
3218  					      u64 data)
3219  {
3220  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221  
3222  	return dd->send_egress_err_status_cnt[56];
3223  }
3224  
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3225  static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3226  					      void *context, int vl, int mode,
3227  					      u64 data)
3228  {
3229  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230  
3231  	return dd->send_egress_err_status_cnt[55];
3232  }
3233  
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3234  static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3235  					      void *context, int vl, int mode,
3236  					      u64 data)
3237  {
3238  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239  
3240  	return dd->send_egress_err_status_cnt[54];
3241  }
3242  
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3243  static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3244  					      void *context, int vl, int mode,
3245  					      u64 data)
3246  {
3247  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248  
3249  	return dd->send_egress_err_status_cnt[53];
3250  }
3251  
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3252  static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3253  					      void *context, int vl, int mode,
3254  					      u64 data)
3255  {
3256  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257  
3258  	return dd->send_egress_err_status_cnt[52];
3259  }
3260  
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3261  static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3262  					      void *context, int vl, int mode,
3263  					      u64 data)
3264  {
3265  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266  
3267  	return dd->send_egress_err_status_cnt[51];
3268  }
3269  
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3270  static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3271  					      void *context, int vl, int mode,
3272  					      u64 data)
3273  {
3274  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275  
3276  	return dd->send_egress_err_status_cnt[50];
3277  }
3278  
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3279  static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3280  					      void *context, int vl, int mode,
3281  					      u64 data)
3282  {
3283  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284  
3285  	return dd->send_egress_err_status_cnt[49];
3286  }
3287  
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3288  static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3289  					      void *context, int vl, int mode,
3290  					      u64 data)
3291  {
3292  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293  
3294  	return dd->send_egress_err_status_cnt[48];
3295  }
3296  
access_tx_credit_return_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3297  static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3298  					      void *context, int vl, int mode,
3299  					      u64 data)
3300  {
3301  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302  
3303  	return dd->send_egress_err_status_cnt[47];
3304  }
3305  
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3306  static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3307  					    void *context, int vl, int mode,
3308  					    u64 data)
3309  {
3310  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311  
3312  	return dd->send_egress_err_status_cnt[46];
3313  }
3314  
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3315  static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3316  					     void *context, int vl, int mode,
3317  					     u64 data)
3318  {
3319  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320  
3321  	return dd->send_egress_err_status_cnt[45];
3322  }
3323  
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3324  static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3325  						 void *context, int vl,
3326  						 int mode, u64 data)
3327  {
3328  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329  
3330  	return dd->send_egress_err_status_cnt[44];
3331  }
3332  
access_tx_read_sdma_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3333  static u64 access_tx_read_sdma_memory_unc_err_cnt(
3334  				const struct cntr_entry *entry,
3335  				void *context, int vl, int mode, u64 data)
3336  {
3337  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338  
3339  	return dd->send_egress_err_status_cnt[43];
3340  }
3341  
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3342  static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3343  					void *context, int vl, int mode,
3344  					u64 data)
3345  {
3346  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347  
3348  	return dd->send_egress_err_status_cnt[42];
3349  }
3350  
access_tx_credit_return_partiy_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3351  static u64 access_tx_credit_return_partiy_err_cnt(
3352  				const struct cntr_entry *entry,
3353  				void *context, int vl, int mode, u64 data)
3354  {
3355  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356  
3357  	return dd->send_egress_err_status_cnt[41];
3358  }
3359  
access_tx_launch_fifo8_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3360  static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3361  				const struct cntr_entry *entry,
3362  				void *context, int vl, int mode, u64 data)
3363  {
3364  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365  
3366  	return dd->send_egress_err_status_cnt[40];
3367  }
3368  
access_tx_launch_fifo7_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3369  static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3370  				const struct cntr_entry *entry,
3371  				void *context, int vl, int mode, u64 data)
3372  {
3373  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374  
3375  	return dd->send_egress_err_status_cnt[39];
3376  }
3377  
access_tx_launch_fifo6_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3378  static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3379  				const struct cntr_entry *entry,
3380  				void *context, int vl, int mode, u64 data)
3381  {
3382  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383  
3384  	return dd->send_egress_err_status_cnt[38];
3385  }
3386  
access_tx_launch_fifo5_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3387  static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3388  				const struct cntr_entry *entry,
3389  				void *context, int vl, int mode, u64 data)
3390  {
3391  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392  
3393  	return dd->send_egress_err_status_cnt[37];
3394  }
3395  
access_tx_launch_fifo4_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3396  static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3397  				const struct cntr_entry *entry,
3398  				void *context, int vl, int mode, u64 data)
3399  {
3400  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401  
3402  	return dd->send_egress_err_status_cnt[36];
3403  }
3404  
access_tx_launch_fifo3_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3405  static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3406  				const struct cntr_entry *entry,
3407  				void *context, int vl, int mode, u64 data)
3408  {
3409  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410  
3411  	return dd->send_egress_err_status_cnt[35];
3412  }
3413  
access_tx_launch_fifo2_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3414  static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3415  				const struct cntr_entry *entry,
3416  				void *context, int vl, int mode, u64 data)
3417  {
3418  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419  
3420  	return dd->send_egress_err_status_cnt[34];
3421  }
3422  
access_tx_launch_fifo1_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3423  static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3424  				const struct cntr_entry *entry,
3425  				void *context, int vl, int mode, u64 data)
3426  {
3427  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428  
3429  	return dd->send_egress_err_status_cnt[33];
3430  }
3431  
access_tx_launch_fifo0_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3432  static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3433  				const struct cntr_entry *entry,
3434  				void *context, int vl, int mode, u64 data)
3435  {
3436  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437  
3438  	return dd->send_egress_err_status_cnt[32];
3439  }
3440  
access_tx_sdma15_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3441  static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3442  				const struct cntr_entry *entry,
3443  				void *context, int vl, int mode, u64 data)
3444  {
3445  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446  
3447  	return dd->send_egress_err_status_cnt[31];
3448  }
3449  
access_tx_sdma14_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3450  static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3451  				const struct cntr_entry *entry,
3452  				void *context, int vl, int mode, u64 data)
3453  {
3454  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455  
3456  	return dd->send_egress_err_status_cnt[30];
3457  }
3458  
access_tx_sdma13_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3459  static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3460  				const struct cntr_entry *entry,
3461  				void *context, int vl, int mode, u64 data)
3462  {
3463  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464  
3465  	return dd->send_egress_err_status_cnt[29];
3466  }
3467  
access_tx_sdma12_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3468  static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3469  				const struct cntr_entry *entry,
3470  				void *context, int vl, int mode, u64 data)
3471  {
3472  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473  
3474  	return dd->send_egress_err_status_cnt[28];
3475  }
3476  
access_tx_sdma11_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3477  static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3478  				const struct cntr_entry *entry,
3479  				void *context, int vl, int mode, u64 data)
3480  {
3481  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482  
3483  	return dd->send_egress_err_status_cnt[27];
3484  }
3485  
access_tx_sdma10_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3486  static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3487  				const struct cntr_entry *entry,
3488  				void *context, int vl, int mode, u64 data)
3489  {
3490  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491  
3492  	return dd->send_egress_err_status_cnt[26];
3493  }
3494  
access_tx_sdma9_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3495  static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3496  				const struct cntr_entry *entry,
3497  				void *context, int vl, int mode, u64 data)
3498  {
3499  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500  
3501  	return dd->send_egress_err_status_cnt[25];
3502  }
3503  
access_tx_sdma8_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3504  static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3505  				const struct cntr_entry *entry,
3506  				void *context, int vl, int mode, u64 data)
3507  {
3508  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509  
3510  	return dd->send_egress_err_status_cnt[24];
3511  }
3512  
access_tx_sdma7_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3513  static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3514  				const struct cntr_entry *entry,
3515  				void *context, int vl, int mode, u64 data)
3516  {
3517  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518  
3519  	return dd->send_egress_err_status_cnt[23];
3520  }
3521  
access_tx_sdma6_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3522  static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3523  				const struct cntr_entry *entry,
3524  				void *context, int vl, int mode, u64 data)
3525  {
3526  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527  
3528  	return dd->send_egress_err_status_cnt[22];
3529  }
3530  
access_tx_sdma5_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3531  static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3532  				const struct cntr_entry *entry,
3533  				void *context, int vl, int mode, u64 data)
3534  {
3535  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536  
3537  	return dd->send_egress_err_status_cnt[21];
3538  }
3539  
access_tx_sdma4_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3540  static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3541  				const struct cntr_entry *entry,
3542  				void *context, int vl, int mode, u64 data)
3543  {
3544  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545  
3546  	return dd->send_egress_err_status_cnt[20];
3547  }
3548  
access_tx_sdma3_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3549  static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3550  				const struct cntr_entry *entry,
3551  				void *context, int vl, int mode, u64 data)
3552  {
3553  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554  
3555  	return dd->send_egress_err_status_cnt[19];
3556  }
3557  
access_tx_sdma2_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3558  static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3559  				const struct cntr_entry *entry,
3560  				void *context, int vl, int mode, u64 data)
3561  {
3562  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563  
3564  	return dd->send_egress_err_status_cnt[18];
3565  }
3566  
access_tx_sdma1_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3567  static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3568  				const struct cntr_entry *entry,
3569  				void *context, int vl, int mode, u64 data)
3570  {
3571  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572  
3573  	return dd->send_egress_err_status_cnt[17];
3574  }
3575  
access_tx_sdma0_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3576  static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3577  				const struct cntr_entry *entry,
3578  				void *context, int vl, int mode, u64 data)
3579  {
3580  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581  
3582  	return dd->send_egress_err_status_cnt[16];
3583  }
3584  
access_tx_config_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3585  static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3586  					   void *context, int vl, int mode,
3587  					   u64 data)
3588  {
3589  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3590  
3591  	return dd->send_egress_err_status_cnt[15];
3592  }
3593  
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3594  static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3595  						 void *context, int vl,
3596  						 int mode, u64 data)
3597  {
3598  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3599  
3600  	return dd->send_egress_err_status_cnt[14];
3601  }
3602  
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3603  static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3604  					       void *context, int vl, int mode,
3605  					       u64 data)
3606  {
3607  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3608  
3609  	return dd->send_egress_err_status_cnt[13];
3610  }
3611  
access_tx_illegal_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3612  static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3613  					void *context, int vl, int mode,
3614  					u64 data)
3615  {
3616  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3617  
3618  	return dd->send_egress_err_status_cnt[12];
3619  }
3620  
access_tx_sbrd_ctl_state_machine_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3621  static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3622  				const struct cntr_entry *entry,
3623  				void *context, int vl, int mode, u64 data)
3624  {
3625  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3626  
3627  	return dd->send_egress_err_status_cnt[11];
3628  }
3629  
access_egress_reserved_10_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3630  static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3631  					     void *context, int vl, int mode,
3632  					     u64 data)
3633  {
3634  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3635  
3636  	return dd->send_egress_err_status_cnt[10];
3637  }
3638  
access_egress_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3639  static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3640  					    void *context, int vl, int mode,
3641  					    u64 data)
3642  {
3643  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3644  
3645  	return dd->send_egress_err_status_cnt[9];
3646  }
3647  
access_tx_sdma_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3648  static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3649  				const struct cntr_entry *entry,
3650  				void *context, int vl, int mode, u64 data)
3651  {
3652  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3653  
3654  	return dd->send_egress_err_status_cnt[8];
3655  }
3656  
access_tx_pio_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3657  static u64 access_tx_pio_launch_intf_parity_err_cnt(
3658  				const struct cntr_entry *entry,
3659  				void *context, int vl, int mode, u64 data)
3660  {
3661  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3662  
3663  	return dd->send_egress_err_status_cnt[7];
3664  }
3665  
access_egress_reserved_6_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3666  static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3667  					    void *context, int vl, int mode,
3668  					    u64 data)
3669  {
3670  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3671  
3672  	return dd->send_egress_err_status_cnt[6];
3673  }
3674  
access_tx_incorrect_link_state_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3675  static u64 access_tx_incorrect_link_state_err_cnt(
3676  				const struct cntr_entry *entry,
3677  				void *context, int vl, int mode, u64 data)
3678  {
3679  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3680  
3681  	return dd->send_egress_err_status_cnt[5];
3682  }
3683  
access_tx_linkdown_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3684  static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3685  				      void *context, int vl, int mode,
3686  				      u64 data)
3687  {
3688  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3689  
3690  	return dd->send_egress_err_status_cnt[4];
3691  }
3692  
access_tx_egress_fifi_underrun_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3693  static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3694  				const struct cntr_entry *entry,
3695  				void *context, int vl, int mode, u64 data)
3696  {
3697  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698  
3699  	return dd->send_egress_err_status_cnt[3];
3700  }
3701  
access_egress_reserved_2_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3702  static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3703  					    void *context, int vl, int mode,
3704  					    u64 data)
3705  {
3706  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707  
3708  	return dd->send_egress_err_status_cnt[2];
3709  }
3710  
access_tx_pkt_integrity_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3711  static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3712  				const struct cntr_entry *entry,
3713  				void *context, int vl, int mode, u64 data)
3714  {
3715  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716  
3717  	return dd->send_egress_err_status_cnt[1];
3718  }
3719  
access_tx_pkt_integrity_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3720  static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3721  				const struct cntr_entry *entry,
3722  				void *context, int vl, int mode, u64 data)
3723  {
3724  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725  
3726  	return dd->send_egress_err_status_cnt[0];
3727  }
3728  
3729  /*
3730   * Software counters corresponding to each of the
3731   * error status bits within SendErrStatus
3732   */
access_send_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3733  static u64 access_send_csr_write_bad_addr_err_cnt(
3734  				const struct cntr_entry *entry,
3735  				void *context, int vl, int mode, u64 data)
3736  {
3737  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3738  
3739  	return dd->send_err_status_cnt[2];
3740  }
3741  
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3742  static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3743  						 void *context, int vl,
3744  						 int mode, u64 data)
3745  {
3746  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747  
3748  	return dd->send_err_status_cnt[1];
3749  }
3750  
access_send_csr_parity_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3751  static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3752  				      void *context, int vl, int mode,
3753  				      u64 data)
3754  {
3755  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756  
3757  	return dd->send_err_status_cnt[0];
3758  }
3759  
3760  /*
3761   * Software counters corresponding to each of the
3762   * error status bits within SendCtxtErrStatus
3763   */
access_pio_write_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3764  static u64 access_pio_write_out_of_bounds_err_cnt(
3765  				const struct cntr_entry *entry,
3766  				void *context, int vl, int mode, u64 data)
3767  {
3768  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3769  
3770  	return dd->sw_ctxt_err_status_cnt[4];
3771  }
3772  
access_pio_write_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3773  static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3774  					     void *context, int vl, int mode,
3775  					     u64 data)
3776  {
3777  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3778  
3779  	return dd->sw_ctxt_err_status_cnt[3];
3780  }
3781  
access_pio_write_crosses_boundary_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3782  static u64 access_pio_write_crosses_boundary_err_cnt(
3783  				const struct cntr_entry *entry,
3784  				void *context, int vl, int mode, u64 data)
3785  {
3786  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3787  
3788  	return dd->sw_ctxt_err_status_cnt[2];
3789  }
3790  
access_pio_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3791  static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3792  						void *context, int vl,
3793  						int mode, u64 data)
3794  {
3795  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3796  
3797  	return dd->sw_ctxt_err_status_cnt[1];
3798  }
3799  
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3800  static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3801  					       void *context, int vl, int mode,
3802  					       u64 data)
3803  {
3804  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3805  
3806  	return dd->sw_ctxt_err_status_cnt[0];
3807  }
3808  
3809  /*
3810   * Software counters corresponding to each of the
3811   * error status bits within SendDmaEngErrStatus
3812   */
access_sdma_header_request_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3813  static u64 access_sdma_header_request_fifo_cor_err_cnt(
3814  				const struct cntr_entry *entry,
3815  				void *context, int vl, int mode, u64 data)
3816  {
3817  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818  
3819  	return dd->sw_send_dma_eng_err_status_cnt[23];
3820  }
3821  
access_sdma_header_storage_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3822  static u64 access_sdma_header_storage_cor_err_cnt(
3823  				const struct cntr_entry *entry,
3824  				void *context, int vl, int mode, u64 data)
3825  {
3826  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3827  
3828  	return dd->sw_send_dma_eng_err_status_cnt[22];
3829  }
3830  
access_sdma_packet_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3831  static u64 access_sdma_packet_tracking_cor_err_cnt(
3832  				const struct cntr_entry *entry,
3833  				void *context, int vl, int mode, u64 data)
3834  {
3835  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3836  
3837  	return dd->sw_send_dma_eng_err_status_cnt[21];
3838  }
3839  
access_sdma_assembly_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3840  static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3841  					    void *context, int vl, int mode,
3842  					    u64 data)
3843  {
3844  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3845  
3846  	return dd->sw_send_dma_eng_err_status_cnt[20];
3847  }
3848  
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3849  static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3850  					      void *context, int vl, int mode,
3851  					      u64 data)
3852  {
3853  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3854  
3855  	return dd->sw_send_dma_eng_err_status_cnt[19];
3856  }
3857  
access_sdma_header_request_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3858  static u64 access_sdma_header_request_fifo_unc_err_cnt(
3859  				const struct cntr_entry *entry,
3860  				void *context, int vl, int mode, u64 data)
3861  {
3862  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3863  
3864  	return dd->sw_send_dma_eng_err_status_cnt[18];
3865  }
3866  
access_sdma_header_storage_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3867  static u64 access_sdma_header_storage_unc_err_cnt(
3868  				const struct cntr_entry *entry,
3869  				void *context, int vl, int mode, u64 data)
3870  {
3871  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3872  
3873  	return dd->sw_send_dma_eng_err_status_cnt[17];
3874  }
3875  
access_sdma_packet_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3876  static u64 access_sdma_packet_tracking_unc_err_cnt(
3877  				const struct cntr_entry *entry,
3878  				void *context, int vl, int mode, u64 data)
3879  {
3880  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3881  
3882  	return dd->sw_send_dma_eng_err_status_cnt[16];
3883  }
3884  
access_sdma_assembly_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3885  static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3886  					    void *context, int vl, int mode,
3887  					    u64 data)
3888  {
3889  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3890  
3891  	return dd->sw_send_dma_eng_err_status_cnt[15];
3892  }
3893  
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3894  static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3895  					      void *context, int vl, int mode,
3896  					      u64 data)
3897  {
3898  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899  
3900  	return dd->sw_send_dma_eng_err_status_cnt[14];
3901  }
3902  
access_sdma_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3903  static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3904  				       void *context, int vl, int mode,
3905  				       u64 data)
3906  {
3907  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908  
3909  	return dd->sw_send_dma_eng_err_status_cnt[13];
3910  }
3911  
access_sdma_header_length_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3912  static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3913  					     void *context, int vl, int mode,
3914  					     u64 data)
3915  {
3916  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917  
3918  	return dd->sw_send_dma_eng_err_status_cnt[12];
3919  }
3920  
access_sdma_header_address_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3921  static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3922  					      void *context, int vl, int mode,
3923  					      u64 data)
3924  {
3925  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926  
3927  	return dd->sw_send_dma_eng_err_status_cnt[11];
3928  }
3929  
access_sdma_header_select_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3930  static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3931  					     void *context, int vl, int mode,
3932  					     u64 data)
3933  {
3934  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935  
3936  	return dd->sw_send_dma_eng_err_status_cnt[10];
3937  }
3938  
access_sdma_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3939  static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3940  					  void *context, int vl, int mode,
3941  					  u64 data)
3942  {
3943  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944  
3945  	return dd->sw_send_dma_eng_err_status_cnt[9];
3946  }
3947  
access_sdma_packet_desc_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3948  static u64 access_sdma_packet_desc_overflow_err_cnt(
3949  				const struct cntr_entry *entry,
3950  				void *context, int vl, int mode, u64 data)
3951  {
3952  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953  
3954  	return dd->sw_send_dma_eng_err_status_cnt[8];
3955  }
3956  
access_sdma_length_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3957  static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3958  					       void *context, int vl,
3959  					       int mode, u64 data)
3960  {
3961  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962  
3963  	return dd->sw_send_dma_eng_err_status_cnt[7];
3964  }
3965  
access_sdma_halt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3966  static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3967  				    void *context, int vl, int mode, u64 data)
3968  {
3969  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3970  
3971  	return dd->sw_send_dma_eng_err_status_cnt[6];
3972  }
3973  
access_sdma_mem_read_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3974  static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3975  					void *context, int vl, int mode,
3976  					u64 data)
3977  {
3978  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3979  
3980  	return dd->sw_send_dma_eng_err_status_cnt[5];
3981  }
3982  
access_sdma_first_desc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3983  static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3984  					  void *context, int vl, int mode,
3985  					  u64 data)
3986  {
3987  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3988  
3989  	return dd->sw_send_dma_eng_err_status_cnt[4];
3990  }
3991  
access_sdma_tail_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3992  static u64 access_sdma_tail_out_of_bounds_err_cnt(
3993  				const struct cntr_entry *entry,
3994  				void *context, int vl, int mode, u64 data)
3995  {
3996  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3997  
3998  	return dd->sw_send_dma_eng_err_status_cnt[3];
3999  }
4000  
access_sdma_too_long_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4001  static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4002  					void *context, int vl, int mode,
4003  					u64 data)
4004  {
4005  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4006  
4007  	return dd->sw_send_dma_eng_err_status_cnt[2];
4008  }
4009  
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4010  static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4011  					    void *context, int vl, int mode,
4012  					    u64 data)
4013  {
4014  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4015  
4016  	return dd->sw_send_dma_eng_err_status_cnt[1];
4017  }
4018  
access_sdma_wrong_dw_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4019  static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4020  					void *context, int vl, int mode,
4021  					u64 data)
4022  {
4023  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4024  
4025  	return dd->sw_send_dma_eng_err_status_cnt[0];
4026  }
4027  
access_dc_rcv_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4028  static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4029  				 void *context, int vl, int mode,
4030  				 u64 data)
4031  {
4032  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4033  
4034  	u64 val = 0;
4035  	u64 csr = entry->csr;
4036  
4037  	val = read_write_csr(dd, csr, mode, data);
4038  	if (mode == CNTR_MODE_R) {
4039  		val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4040  			CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4041  	} else if (mode == CNTR_MODE_W) {
4042  		dd->sw_rcv_bypass_packet_errors = 0;
4043  	} else {
4044  		dd_dev_err(dd, "Invalid cntr register access mode");
4045  		return 0;
4046  	}
4047  	return val;
4048  }
4049  
4050  #define def_access_sw_cpu(cntr) \
4051  static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,		      \
4052  			      void *context, int vl, int mode, u64 data)      \
4053  {									      \
4054  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4055  	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
4056  			      ppd->ibport_data.rvp.cntr, vl,		      \
4057  			      mode, data);				      \
4058  }
4059  
4060  def_access_sw_cpu(rc_acks);
4061  def_access_sw_cpu(rc_qacks);
4062  def_access_sw_cpu(rc_delayed_comp);
4063  
4064  #define def_access_ibp_counter(cntr) \
4065  static u64 access_ibp_##cntr(const struct cntr_entry *entry,		      \
4066  				void *context, int vl, int mode, u64 data)    \
4067  {									      \
4068  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4069  									      \
4070  	if (vl != CNTR_INVALID_VL)					      \
4071  		return 0;						      \
4072  									      \
4073  	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
4074  			     mode, data);				      \
4075  }
4076  
4077  def_access_ibp_counter(loop_pkts);
4078  def_access_ibp_counter(rc_resends);
4079  def_access_ibp_counter(rnr_naks);
4080  def_access_ibp_counter(other_naks);
4081  def_access_ibp_counter(rc_timeouts);
4082  def_access_ibp_counter(pkt_drops);
4083  def_access_ibp_counter(dmawait);
4084  def_access_ibp_counter(rc_seqnak);
4085  def_access_ibp_counter(rc_dupreq);
4086  def_access_ibp_counter(rdma_seq);
4087  def_access_ibp_counter(unaligned);
4088  def_access_ibp_counter(seq_naks);
4089  def_access_ibp_counter(rc_crwaits);
4090  
4091  static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4092  [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4093  [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4094  [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4095  [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4096  [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4097  [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4098  			CNTR_NORMAL),
4099  [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4100  			CNTR_NORMAL),
4101  [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4102  			RCV_TID_FLOW_GEN_MISMATCH_CNT,
4103  			CNTR_NORMAL),
4104  [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4105  			CNTR_NORMAL),
4106  [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4107  			RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4108  [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4109  			CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4110  [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4111  			CNTR_NORMAL),
4112  [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4113  			CNTR_NORMAL),
4114  [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4115  			CNTR_NORMAL),
4116  [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4117  			CNTR_NORMAL),
4118  [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4119  			CNTR_NORMAL),
4120  [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4121  			CNTR_NORMAL),
4122  [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4123  			CCE_RCV_URGENT_INT_CNT,	CNTR_NORMAL),
4124  [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4125  			CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4126  [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4127  			      CNTR_SYNTH),
4128  [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4129  			    access_dc_rcv_err_cnt),
4130  [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4131  				 CNTR_SYNTH),
4132  [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4133  				  CNTR_SYNTH),
4134  [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4135  				  CNTR_SYNTH),
4136  [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4137  				   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4138  [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4139  				  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4140  				  CNTR_SYNTH),
4141  [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4142  				DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4143  [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4144  			       CNTR_SYNTH),
4145  [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4146  			      CNTR_SYNTH),
4147  [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4148  			       CNTR_SYNTH),
4149  [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4150  				 CNTR_SYNTH),
4151  [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4152  				CNTR_SYNTH),
4153  [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4154  				CNTR_SYNTH),
4155  [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4156  			       CNTR_SYNTH),
4157  [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4158  				 CNTR_SYNTH | CNTR_VL),
4159  [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4160  				CNTR_SYNTH | CNTR_VL),
4161  [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4162  [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4163  				 CNTR_SYNTH | CNTR_VL),
4164  [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4165  [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4166  				 CNTR_SYNTH | CNTR_VL),
4167  [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4168  			      CNTR_SYNTH),
4169  [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4170  				 CNTR_SYNTH | CNTR_VL),
4171  [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4172  				CNTR_SYNTH),
4173  [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4174  				   CNTR_SYNTH | CNTR_VL),
4175  [C_DC_TOTAL_CRC] =
4176  	DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4177  			 CNTR_SYNTH),
4178  [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4179  				  CNTR_SYNTH),
4180  [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4181  				  CNTR_SYNTH),
4182  [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4183  				  CNTR_SYNTH),
4184  [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4185  				  CNTR_SYNTH),
4186  [C_DC_CRC_MULT_LN] =
4187  	DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4188  			 CNTR_SYNTH),
4189  [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4190  				    CNTR_SYNTH),
4191  [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4192  				    CNTR_SYNTH),
4193  [C_DC_SEQ_CRC_CNT] =
4194  	DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4195  			 CNTR_SYNTH),
4196  [C_DC_ESC0_ONLY_CNT] =
4197  	DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4198  			 CNTR_SYNTH),
4199  [C_DC_ESC0_PLUS1_CNT] =
4200  	DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4201  			 CNTR_SYNTH),
4202  [C_DC_ESC0_PLUS2_CNT] =
4203  	DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4204  			 CNTR_SYNTH),
4205  [C_DC_REINIT_FROM_PEER_CNT] =
4206  	DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4207  			 CNTR_SYNTH),
4208  [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4209  				  CNTR_SYNTH),
4210  [C_DC_MISC_FLG_CNT] =
4211  	DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4212  			 CNTR_SYNTH),
4213  [C_DC_PRF_GOOD_LTP_CNT] =
4214  	DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4215  [C_DC_PRF_ACCEPTED_LTP_CNT] =
4216  	DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4217  			 CNTR_SYNTH),
4218  [C_DC_PRF_RX_FLIT_CNT] =
4219  	DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4220  [C_DC_PRF_TX_FLIT_CNT] =
4221  	DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4222  [C_DC_PRF_CLK_CNTR] =
4223  	DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4224  [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4225  	DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4226  [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4227  	DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4228  			 CNTR_SYNTH),
4229  [C_DC_PG_STS_TX_SBE_CNT] =
4230  	DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4231  [C_DC_PG_STS_TX_MBE_CNT] =
4232  	DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4233  			 CNTR_SYNTH),
4234  [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4235  			    access_sw_cpu_intr),
4236  [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4237  			    access_sw_cpu_rcv_limit),
4238  [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4239  			    access_sw_ctx0_seq_drop),
4240  [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4241  			    access_sw_vtx_wait),
4242  [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4243  			    access_sw_pio_wait),
4244  [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4245  			    access_sw_pio_drain),
4246  [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4247  			    access_sw_kmem_wait),
4248  [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4249  			    hfi1_access_sw_tid_wait),
4250  [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4251  			    access_sw_send_schedule),
4252  [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4253  				      SEND_DMA_DESC_FETCHED_CNT, 0,
4254  				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4255  				      dev_access_u32_csr),
4256  [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4257  			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4258  			     access_sde_int_cnt),
4259  [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4260  			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261  			     access_sde_err_cnt),
4262  [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4263  				  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264  				  access_sde_idle_int_cnt),
4265  [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4266  				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267  				      access_sde_progress_int_cnt),
4268  /* MISC_ERR_STATUS */
4269  [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4270  				CNTR_NORMAL,
4271  				access_misc_pll_lock_fail_err_cnt),
4272  [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4273  				CNTR_NORMAL,
4274  				access_misc_mbist_fail_err_cnt),
4275  [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4276  				CNTR_NORMAL,
4277  				access_misc_invalid_eep_cmd_err_cnt),
4278  [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4279  				CNTR_NORMAL,
4280  				access_misc_efuse_done_parity_err_cnt),
4281  [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4282  				CNTR_NORMAL,
4283  				access_misc_efuse_write_err_cnt),
4284  [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4285  				0, CNTR_NORMAL,
4286  				access_misc_efuse_read_bad_addr_err_cnt),
4287  [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4288  				CNTR_NORMAL,
4289  				access_misc_efuse_csr_parity_err_cnt),
4290  [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4291  				CNTR_NORMAL,
4292  				access_misc_fw_auth_failed_err_cnt),
4293  [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4294  				CNTR_NORMAL,
4295  				access_misc_key_mismatch_err_cnt),
4296  [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4297  				CNTR_NORMAL,
4298  				access_misc_sbus_write_failed_err_cnt),
4299  [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4300  				CNTR_NORMAL,
4301  				access_misc_csr_write_bad_addr_err_cnt),
4302  [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4303  				CNTR_NORMAL,
4304  				access_misc_csr_read_bad_addr_err_cnt),
4305  [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4306  				CNTR_NORMAL,
4307  				access_misc_csr_parity_err_cnt),
4308  /* CceErrStatus */
4309  [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4310  				CNTR_NORMAL,
4311  				access_sw_cce_err_status_aggregated_cnt),
4312  [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4313  				CNTR_NORMAL,
4314  				access_cce_msix_csr_parity_err_cnt),
4315  [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4316  				CNTR_NORMAL,
4317  				access_cce_int_map_unc_err_cnt),
4318  [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4319  				CNTR_NORMAL,
4320  				access_cce_int_map_cor_err_cnt),
4321  [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4322  				CNTR_NORMAL,
4323  				access_cce_msix_table_unc_err_cnt),
4324  [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4325  				CNTR_NORMAL,
4326  				access_cce_msix_table_cor_err_cnt),
4327  [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4328  				0, CNTR_NORMAL,
4329  				access_cce_rxdma_conv_fifo_parity_err_cnt),
4330  [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4331  				0, CNTR_NORMAL,
4332  				access_cce_rcpl_async_fifo_parity_err_cnt),
4333  [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4334  				CNTR_NORMAL,
4335  				access_cce_seg_write_bad_addr_err_cnt),
4336  [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4337  				CNTR_NORMAL,
4338  				access_cce_seg_read_bad_addr_err_cnt),
4339  [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4340  				CNTR_NORMAL,
4341  				access_la_triggered_cnt),
4342  [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4343  				CNTR_NORMAL,
4344  				access_cce_trgt_cpl_timeout_err_cnt),
4345  [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4346  				CNTR_NORMAL,
4347  				access_pcic_receive_parity_err_cnt),
4348  [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4349  				CNTR_NORMAL,
4350  				access_pcic_transmit_back_parity_err_cnt),
4351  [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4352  				0, CNTR_NORMAL,
4353  				access_pcic_transmit_front_parity_err_cnt),
4354  [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4355  				CNTR_NORMAL,
4356  				access_pcic_cpl_dat_q_unc_err_cnt),
4357  [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4358  				CNTR_NORMAL,
4359  				access_pcic_cpl_hd_q_unc_err_cnt),
4360  [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4361  				CNTR_NORMAL,
4362  				access_pcic_post_dat_q_unc_err_cnt),
4363  [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4364  				CNTR_NORMAL,
4365  				access_pcic_post_hd_q_unc_err_cnt),
4366  [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4367  				CNTR_NORMAL,
4368  				access_pcic_retry_sot_mem_unc_err_cnt),
4369  [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4370  				CNTR_NORMAL,
4371  				access_pcic_retry_mem_unc_err),
4372  [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4373  				CNTR_NORMAL,
4374  				access_pcic_n_post_dat_q_parity_err_cnt),
4375  [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4376  				CNTR_NORMAL,
4377  				access_pcic_n_post_h_q_parity_err_cnt),
4378  [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4379  				CNTR_NORMAL,
4380  				access_pcic_cpl_dat_q_cor_err_cnt),
4381  [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4382  				CNTR_NORMAL,
4383  				access_pcic_cpl_hd_q_cor_err_cnt),
4384  [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4385  				CNTR_NORMAL,
4386  				access_pcic_post_dat_q_cor_err_cnt),
4387  [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4388  				CNTR_NORMAL,
4389  				access_pcic_post_hd_q_cor_err_cnt),
4390  [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4391  				CNTR_NORMAL,
4392  				access_pcic_retry_sot_mem_cor_err_cnt),
4393  [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4394  				CNTR_NORMAL,
4395  				access_pcic_retry_mem_cor_err_cnt),
4396  [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4397  				"CceCli1AsyncFifoDbgParityError", 0, 0,
4398  				CNTR_NORMAL,
4399  				access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4400  [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4401  				"CceCli1AsyncFifoRxdmaParityError", 0, 0,
4402  				CNTR_NORMAL,
4403  				access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4404  				),
4405  [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4406  			"CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4407  			CNTR_NORMAL,
4408  			access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4409  [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4410  			"CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4411  			CNTR_NORMAL,
4412  			access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4413  [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4414  			0, CNTR_NORMAL,
4415  			access_cce_cli2_async_fifo_parity_err_cnt),
4416  [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4417  			CNTR_NORMAL,
4418  			access_cce_csr_cfg_bus_parity_err_cnt),
4419  [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4420  			0, CNTR_NORMAL,
4421  			access_cce_cli0_async_fifo_parity_err_cnt),
4422  [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4423  			CNTR_NORMAL,
4424  			access_cce_rspd_data_parity_err_cnt),
4425  [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4426  			CNTR_NORMAL,
4427  			access_cce_trgt_access_err_cnt),
4428  [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4429  			0, CNTR_NORMAL,
4430  			access_cce_trgt_async_fifo_parity_err_cnt),
4431  [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4432  			CNTR_NORMAL,
4433  			access_cce_csr_write_bad_addr_err_cnt),
4434  [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4435  			CNTR_NORMAL,
4436  			access_cce_csr_read_bad_addr_err_cnt),
4437  [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4438  			CNTR_NORMAL,
4439  			access_ccs_csr_parity_err_cnt),
4440  
4441  /* RcvErrStatus */
4442  [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4443  			CNTR_NORMAL,
4444  			access_rx_csr_parity_err_cnt),
4445  [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4446  			CNTR_NORMAL,
4447  			access_rx_csr_write_bad_addr_err_cnt),
4448  [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4449  			CNTR_NORMAL,
4450  			access_rx_csr_read_bad_addr_err_cnt),
4451  [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4452  			CNTR_NORMAL,
4453  			access_rx_dma_csr_unc_err_cnt),
4454  [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4455  			CNTR_NORMAL,
4456  			access_rx_dma_dq_fsm_encoding_err_cnt),
4457  [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4458  			CNTR_NORMAL,
4459  			access_rx_dma_eq_fsm_encoding_err_cnt),
4460  [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4461  			CNTR_NORMAL,
4462  			access_rx_dma_csr_parity_err_cnt),
4463  [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4464  			CNTR_NORMAL,
4465  			access_rx_rbuf_data_cor_err_cnt),
4466  [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4467  			CNTR_NORMAL,
4468  			access_rx_rbuf_data_unc_err_cnt),
4469  [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4470  			CNTR_NORMAL,
4471  			access_rx_dma_data_fifo_rd_cor_err_cnt),
4472  [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4473  			CNTR_NORMAL,
4474  			access_rx_dma_data_fifo_rd_unc_err_cnt),
4475  [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4476  			CNTR_NORMAL,
4477  			access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4478  [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4479  			CNTR_NORMAL,
4480  			access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4481  [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4482  			CNTR_NORMAL,
4483  			access_rx_rbuf_desc_part2_cor_err_cnt),
4484  [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4485  			CNTR_NORMAL,
4486  			access_rx_rbuf_desc_part2_unc_err_cnt),
4487  [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4488  			CNTR_NORMAL,
4489  			access_rx_rbuf_desc_part1_cor_err_cnt),
4490  [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4491  			CNTR_NORMAL,
4492  			access_rx_rbuf_desc_part1_unc_err_cnt),
4493  [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4494  			CNTR_NORMAL,
4495  			access_rx_hq_intr_fsm_err_cnt),
4496  [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4497  			CNTR_NORMAL,
4498  			access_rx_hq_intr_csr_parity_err_cnt),
4499  [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4500  			CNTR_NORMAL,
4501  			access_rx_lookup_csr_parity_err_cnt),
4502  [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4503  			CNTR_NORMAL,
4504  			access_rx_lookup_rcv_array_cor_err_cnt),
4505  [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4506  			CNTR_NORMAL,
4507  			access_rx_lookup_rcv_array_unc_err_cnt),
4508  [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4509  			0, CNTR_NORMAL,
4510  			access_rx_lookup_des_part2_parity_err_cnt),
4511  [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4512  			0, CNTR_NORMAL,
4513  			access_rx_lookup_des_part1_unc_cor_err_cnt),
4514  [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4515  			CNTR_NORMAL,
4516  			access_rx_lookup_des_part1_unc_err_cnt),
4517  [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4518  			CNTR_NORMAL,
4519  			access_rx_rbuf_next_free_buf_cor_err_cnt),
4520  [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4521  			CNTR_NORMAL,
4522  			access_rx_rbuf_next_free_buf_unc_err_cnt),
4523  [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4524  			"RxRbufFlInitWrAddrParityErr", 0, 0,
4525  			CNTR_NORMAL,
4526  			access_rbuf_fl_init_wr_addr_parity_err_cnt),
4527  [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4528  			0, CNTR_NORMAL,
4529  			access_rx_rbuf_fl_initdone_parity_err_cnt),
4530  [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4531  			0, CNTR_NORMAL,
4532  			access_rx_rbuf_fl_write_addr_parity_err_cnt),
4533  [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4534  			CNTR_NORMAL,
4535  			access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4536  [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4537  			CNTR_NORMAL,
4538  			access_rx_rbuf_empty_err_cnt),
4539  [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4540  			CNTR_NORMAL,
4541  			access_rx_rbuf_full_err_cnt),
4542  [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4543  			CNTR_NORMAL,
4544  			access_rbuf_bad_lookup_err_cnt),
4545  [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4546  			CNTR_NORMAL,
4547  			access_rbuf_ctx_id_parity_err_cnt),
4548  [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4549  			CNTR_NORMAL,
4550  			access_rbuf_csr_qeopdw_parity_err_cnt),
4551  [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4552  			"RxRbufCsrQNumOfPktParityErr", 0, 0,
4553  			CNTR_NORMAL,
4554  			access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4555  [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4556  			"RxRbufCsrQTlPtrParityErr", 0, 0,
4557  			CNTR_NORMAL,
4558  			access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4559  [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4560  			0, CNTR_NORMAL,
4561  			access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4562  [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4563  			0, CNTR_NORMAL,
4564  			access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4565  [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4566  			0, 0, CNTR_NORMAL,
4567  			access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4568  [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4569  			0, CNTR_NORMAL,
4570  			access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4571  [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4572  			"RxRbufCsrQHeadBufNumParityErr", 0, 0,
4573  			CNTR_NORMAL,
4574  			access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4575  [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4576  			0, CNTR_NORMAL,
4577  			access_rx_rbuf_block_list_read_cor_err_cnt),
4578  [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4579  			0, CNTR_NORMAL,
4580  			access_rx_rbuf_block_list_read_unc_err_cnt),
4581  [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4582  			CNTR_NORMAL,
4583  			access_rx_rbuf_lookup_des_cor_err_cnt),
4584  [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4585  			CNTR_NORMAL,
4586  			access_rx_rbuf_lookup_des_unc_err_cnt),
4587  [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4588  			"RxRbufLookupDesRegUncCorErr", 0, 0,
4589  			CNTR_NORMAL,
4590  			access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4591  [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4592  			CNTR_NORMAL,
4593  			access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4594  [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4595  			CNTR_NORMAL,
4596  			access_rx_rbuf_free_list_cor_err_cnt),
4597  [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4598  			CNTR_NORMAL,
4599  			access_rx_rbuf_free_list_unc_err_cnt),
4600  [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4601  			CNTR_NORMAL,
4602  			access_rx_rcv_fsm_encoding_err_cnt),
4603  [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4604  			CNTR_NORMAL,
4605  			access_rx_dma_flag_cor_err_cnt),
4606  [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4607  			CNTR_NORMAL,
4608  			access_rx_dma_flag_unc_err_cnt),
4609  [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4610  			CNTR_NORMAL,
4611  			access_rx_dc_sop_eop_parity_err_cnt),
4612  [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4613  			CNTR_NORMAL,
4614  			access_rx_rcv_csr_parity_err_cnt),
4615  [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4616  			CNTR_NORMAL,
4617  			access_rx_rcv_qp_map_table_cor_err_cnt),
4618  [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4619  			CNTR_NORMAL,
4620  			access_rx_rcv_qp_map_table_unc_err_cnt),
4621  [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4622  			CNTR_NORMAL,
4623  			access_rx_rcv_data_cor_err_cnt),
4624  [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4625  			CNTR_NORMAL,
4626  			access_rx_rcv_data_unc_err_cnt),
4627  [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4628  			CNTR_NORMAL,
4629  			access_rx_rcv_hdr_cor_err_cnt),
4630  [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4631  			CNTR_NORMAL,
4632  			access_rx_rcv_hdr_unc_err_cnt),
4633  [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4634  			CNTR_NORMAL,
4635  			access_rx_dc_intf_parity_err_cnt),
4636  [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4637  			CNTR_NORMAL,
4638  			access_rx_dma_csr_cor_err_cnt),
4639  /* SendPioErrStatus */
4640  [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4641  			CNTR_NORMAL,
4642  			access_pio_pec_sop_head_parity_err_cnt),
4643  [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4644  			CNTR_NORMAL,
4645  			access_pio_pcc_sop_head_parity_err_cnt),
4646  [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4647  			0, 0, CNTR_NORMAL,
4648  			access_pio_last_returned_cnt_parity_err_cnt),
4649  [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4650  			0, CNTR_NORMAL,
4651  			access_pio_current_free_cnt_parity_err_cnt),
4652  [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4653  			CNTR_NORMAL,
4654  			access_pio_reserved_31_err_cnt),
4655  [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4656  			CNTR_NORMAL,
4657  			access_pio_reserved_30_err_cnt),
4658  [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4659  			CNTR_NORMAL,
4660  			access_pio_ppmc_sop_len_err_cnt),
4661  [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4662  			CNTR_NORMAL,
4663  			access_pio_ppmc_bqc_mem_parity_err_cnt),
4664  [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4665  			CNTR_NORMAL,
4666  			access_pio_vl_fifo_parity_err_cnt),
4667  [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4668  			CNTR_NORMAL,
4669  			access_pio_vlf_sop_parity_err_cnt),
4670  [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4671  			CNTR_NORMAL,
4672  			access_pio_vlf_v1_len_parity_err_cnt),
4673  [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4674  			CNTR_NORMAL,
4675  			access_pio_block_qw_count_parity_err_cnt),
4676  [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4677  			CNTR_NORMAL,
4678  			access_pio_write_qw_valid_parity_err_cnt),
4679  [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4680  			CNTR_NORMAL,
4681  			access_pio_state_machine_err_cnt),
4682  [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4683  			CNTR_NORMAL,
4684  			access_pio_write_data_parity_err_cnt),
4685  [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4686  			CNTR_NORMAL,
4687  			access_pio_host_addr_mem_cor_err_cnt),
4688  [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4689  			CNTR_NORMAL,
4690  			access_pio_host_addr_mem_unc_err_cnt),
4691  [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4692  			CNTR_NORMAL,
4693  			access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4694  [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4695  			CNTR_NORMAL,
4696  			access_pio_init_sm_in_err_cnt),
4697  [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4698  			CNTR_NORMAL,
4699  			access_pio_ppmc_pbl_fifo_err_cnt),
4700  [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4701  			0, CNTR_NORMAL,
4702  			access_pio_credit_ret_fifo_parity_err_cnt),
4703  [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4704  			CNTR_NORMAL,
4705  			access_pio_v1_len_mem_bank1_cor_err_cnt),
4706  [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4707  			CNTR_NORMAL,
4708  			access_pio_v1_len_mem_bank0_cor_err_cnt),
4709  [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4710  			CNTR_NORMAL,
4711  			access_pio_v1_len_mem_bank1_unc_err_cnt),
4712  [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4713  			CNTR_NORMAL,
4714  			access_pio_v1_len_mem_bank0_unc_err_cnt),
4715  [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4716  			CNTR_NORMAL,
4717  			access_pio_sm_pkt_reset_parity_err_cnt),
4718  [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4719  			CNTR_NORMAL,
4720  			access_pio_pkt_evict_fifo_parity_err_cnt),
4721  [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4722  			"PioSbrdctrlCrrelFifoParityErr", 0, 0,
4723  			CNTR_NORMAL,
4724  			access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4725  [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4726  			CNTR_NORMAL,
4727  			access_pio_sbrdctl_crrel_parity_err_cnt),
4728  [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4729  			CNTR_NORMAL,
4730  			access_pio_pec_fifo_parity_err_cnt),
4731  [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4732  			CNTR_NORMAL,
4733  			access_pio_pcc_fifo_parity_err_cnt),
4734  [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4735  			CNTR_NORMAL,
4736  			access_pio_sb_mem_fifo1_err_cnt),
4737  [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4738  			CNTR_NORMAL,
4739  			access_pio_sb_mem_fifo0_err_cnt),
4740  [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4741  			CNTR_NORMAL,
4742  			access_pio_csr_parity_err_cnt),
4743  [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4744  			CNTR_NORMAL,
4745  			access_pio_write_addr_parity_err_cnt),
4746  [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4747  			CNTR_NORMAL,
4748  			access_pio_write_bad_ctxt_err_cnt),
4749  /* SendDmaErrStatus */
4750  [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4751  			0, CNTR_NORMAL,
4752  			access_sdma_pcie_req_tracking_cor_err_cnt),
4753  [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4754  			0, CNTR_NORMAL,
4755  			access_sdma_pcie_req_tracking_unc_err_cnt),
4756  [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4757  			CNTR_NORMAL,
4758  			access_sdma_csr_parity_err_cnt),
4759  [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4760  			CNTR_NORMAL,
4761  			access_sdma_rpy_tag_err_cnt),
4762  /* SendEgressErrStatus */
4763  [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4764  			CNTR_NORMAL,
4765  			access_tx_read_pio_memory_csr_unc_err_cnt),
4766  [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4767  			0, CNTR_NORMAL,
4768  			access_tx_read_sdma_memory_csr_err_cnt),
4769  [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4770  			CNTR_NORMAL,
4771  			access_tx_egress_fifo_cor_err_cnt),
4772  [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4773  			CNTR_NORMAL,
4774  			access_tx_read_pio_memory_cor_err_cnt),
4775  [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4776  			CNTR_NORMAL,
4777  			access_tx_read_sdma_memory_cor_err_cnt),
4778  [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4779  			CNTR_NORMAL,
4780  			access_tx_sb_hdr_cor_err_cnt),
4781  [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4782  			CNTR_NORMAL,
4783  			access_tx_credit_overrun_err_cnt),
4784  [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4785  			CNTR_NORMAL,
4786  			access_tx_launch_fifo8_cor_err_cnt),
4787  [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4788  			CNTR_NORMAL,
4789  			access_tx_launch_fifo7_cor_err_cnt),
4790  [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4791  			CNTR_NORMAL,
4792  			access_tx_launch_fifo6_cor_err_cnt),
4793  [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4794  			CNTR_NORMAL,
4795  			access_tx_launch_fifo5_cor_err_cnt),
4796  [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4797  			CNTR_NORMAL,
4798  			access_tx_launch_fifo4_cor_err_cnt),
4799  [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4800  			CNTR_NORMAL,
4801  			access_tx_launch_fifo3_cor_err_cnt),
4802  [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4803  			CNTR_NORMAL,
4804  			access_tx_launch_fifo2_cor_err_cnt),
4805  [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4806  			CNTR_NORMAL,
4807  			access_tx_launch_fifo1_cor_err_cnt),
4808  [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4809  			CNTR_NORMAL,
4810  			access_tx_launch_fifo0_cor_err_cnt),
4811  [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4812  			CNTR_NORMAL,
4813  			access_tx_credit_return_vl_err_cnt),
4814  [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4815  			CNTR_NORMAL,
4816  			access_tx_hcrc_insertion_err_cnt),
4817  [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4818  			CNTR_NORMAL,
4819  			access_tx_egress_fifo_unc_err_cnt),
4820  [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4821  			CNTR_NORMAL,
4822  			access_tx_read_pio_memory_unc_err_cnt),
4823  [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4824  			CNTR_NORMAL,
4825  			access_tx_read_sdma_memory_unc_err_cnt),
4826  [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4827  			CNTR_NORMAL,
4828  			access_tx_sb_hdr_unc_err_cnt),
4829  [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4830  			CNTR_NORMAL,
4831  			access_tx_credit_return_partiy_err_cnt),
4832  [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4833  			0, 0, CNTR_NORMAL,
4834  			access_tx_launch_fifo8_unc_or_parity_err_cnt),
4835  [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4836  			0, 0, CNTR_NORMAL,
4837  			access_tx_launch_fifo7_unc_or_parity_err_cnt),
4838  [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4839  			0, 0, CNTR_NORMAL,
4840  			access_tx_launch_fifo6_unc_or_parity_err_cnt),
4841  [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4842  			0, 0, CNTR_NORMAL,
4843  			access_tx_launch_fifo5_unc_or_parity_err_cnt),
4844  [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4845  			0, 0, CNTR_NORMAL,
4846  			access_tx_launch_fifo4_unc_or_parity_err_cnt),
4847  [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4848  			0, 0, CNTR_NORMAL,
4849  			access_tx_launch_fifo3_unc_or_parity_err_cnt),
4850  [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4851  			0, 0, CNTR_NORMAL,
4852  			access_tx_launch_fifo2_unc_or_parity_err_cnt),
4853  [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4854  			0, 0, CNTR_NORMAL,
4855  			access_tx_launch_fifo1_unc_or_parity_err_cnt),
4856  [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4857  			0, 0, CNTR_NORMAL,
4858  			access_tx_launch_fifo0_unc_or_parity_err_cnt),
4859  [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4860  			0, 0, CNTR_NORMAL,
4861  			access_tx_sdma15_disallowed_packet_err_cnt),
4862  [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4863  			0, 0, CNTR_NORMAL,
4864  			access_tx_sdma14_disallowed_packet_err_cnt),
4865  [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4866  			0, 0, CNTR_NORMAL,
4867  			access_tx_sdma13_disallowed_packet_err_cnt),
4868  [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4869  			0, 0, CNTR_NORMAL,
4870  			access_tx_sdma12_disallowed_packet_err_cnt),
4871  [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4872  			0, 0, CNTR_NORMAL,
4873  			access_tx_sdma11_disallowed_packet_err_cnt),
4874  [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4875  			0, 0, CNTR_NORMAL,
4876  			access_tx_sdma10_disallowed_packet_err_cnt),
4877  [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4878  			0, 0, CNTR_NORMAL,
4879  			access_tx_sdma9_disallowed_packet_err_cnt),
4880  [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4881  			0, 0, CNTR_NORMAL,
4882  			access_tx_sdma8_disallowed_packet_err_cnt),
4883  [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4884  			0, 0, CNTR_NORMAL,
4885  			access_tx_sdma7_disallowed_packet_err_cnt),
4886  [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4887  			0, 0, CNTR_NORMAL,
4888  			access_tx_sdma6_disallowed_packet_err_cnt),
4889  [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4890  			0, 0, CNTR_NORMAL,
4891  			access_tx_sdma5_disallowed_packet_err_cnt),
4892  [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4893  			0, 0, CNTR_NORMAL,
4894  			access_tx_sdma4_disallowed_packet_err_cnt),
4895  [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4896  			0, 0, CNTR_NORMAL,
4897  			access_tx_sdma3_disallowed_packet_err_cnt),
4898  [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4899  			0, 0, CNTR_NORMAL,
4900  			access_tx_sdma2_disallowed_packet_err_cnt),
4901  [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4902  			0, 0, CNTR_NORMAL,
4903  			access_tx_sdma1_disallowed_packet_err_cnt),
4904  [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4905  			0, 0, CNTR_NORMAL,
4906  			access_tx_sdma0_disallowed_packet_err_cnt),
4907  [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4908  			CNTR_NORMAL,
4909  			access_tx_config_parity_err_cnt),
4910  [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4911  			CNTR_NORMAL,
4912  			access_tx_sbrd_ctl_csr_parity_err_cnt),
4913  [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4914  			CNTR_NORMAL,
4915  			access_tx_launch_csr_parity_err_cnt),
4916  [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4917  			CNTR_NORMAL,
4918  			access_tx_illegal_vl_err_cnt),
4919  [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4920  			"TxSbrdCtlStateMachineParityErr", 0, 0,
4921  			CNTR_NORMAL,
4922  			access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4923  [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4924  			CNTR_NORMAL,
4925  			access_egress_reserved_10_err_cnt),
4926  [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4927  			CNTR_NORMAL,
4928  			access_egress_reserved_9_err_cnt),
4929  [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4930  			0, 0, CNTR_NORMAL,
4931  			access_tx_sdma_launch_intf_parity_err_cnt),
4932  [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4933  			CNTR_NORMAL,
4934  			access_tx_pio_launch_intf_parity_err_cnt),
4935  [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4936  			CNTR_NORMAL,
4937  			access_egress_reserved_6_err_cnt),
4938  [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4939  			CNTR_NORMAL,
4940  			access_tx_incorrect_link_state_err_cnt),
4941  [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4942  			CNTR_NORMAL,
4943  			access_tx_linkdown_err_cnt),
4944  [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4945  			"EgressFifoUnderrunOrParityErr", 0, 0,
4946  			CNTR_NORMAL,
4947  			access_tx_egress_fifi_underrun_or_parity_err_cnt),
4948  [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4949  			CNTR_NORMAL,
4950  			access_egress_reserved_2_err_cnt),
4951  [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4952  			CNTR_NORMAL,
4953  			access_tx_pkt_integrity_mem_unc_err_cnt),
4954  [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4955  			CNTR_NORMAL,
4956  			access_tx_pkt_integrity_mem_cor_err_cnt),
4957  /* SendErrStatus */
4958  [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4959  			CNTR_NORMAL,
4960  			access_send_csr_write_bad_addr_err_cnt),
4961  [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4962  			CNTR_NORMAL,
4963  			access_send_csr_read_bad_addr_err_cnt),
4964  [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4965  			CNTR_NORMAL,
4966  			access_send_csr_parity_cnt),
4967  /* SendCtxtErrStatus */
4968  [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4969  			CNTR_NORMAL,
4970  			access_pio_write_out_of_bounds_err_cnt),
4971  [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4972  			CNTR_NORMAL,
4973  			access_pio_write_overflow_err_cnt),
4974  [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4975  			0, 0, CNTR_NORMAL,
4976  			access_pio_write_crosses_boundary_err_cnt),
4977  [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4978  			CNTR_NORMAL,
4979  			access_pio_disallowed_packet_err_cnt),
4980  [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4981  			CNTR_NORMAL,
4982  			access_pio_inconsistent_sop_err_cnt),
4983  /* SendDmaEngErrStatus */
4984  [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4985  			0, 0, CNTR_NORMAL,
4986  			access_sdma_header_request_fifo_cor_err_cnt),
4987  [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4988  			CNTR_NORMAL,
4989  			access_sdma_header_storage_cor_err_cnt),
4990  [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4991  			CNTR_NORMAL,
4992  			access_sdma_packet_tracking_cor_err_cnt),
4993  [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4994  			CNTR_NORMAL,
4995  			access_sdma_assembly_cor_err_cnt),
4996  [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4997  			CNTR_NORMAL,
4998  			access_sdma_desc_table_cor_err_cnt),
4999  [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5000  			0, 0, CNTR_NORMAL,
5001  			access_sdma_header_request_fifo_unc_err_cnt),
5002  [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5003  			CNTR_NORMAL,
5004  			access_sdma_header_storage_unc_err_cnt),
5005  [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5006  			CNTR_NORMAL,
5007  			access_sdma_packet_tracking_unc_err_cnt),
5008  [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5009  			CNTR_NORMAL,
5010  			access_sdma_assembly_unc_err_cnt),
5011  [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5012  			CNTR_NORMAL,
5013  			access_sdma_desc_table_unc_err_cnt),
5014  [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5015  			CNTR_NORMAL,
5016  			access_sdma_timeout_err_cnt),
5017  [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5018  			CNTR_NORMAL,
5019  			access_sdma_header_length_err_cnt),
5020  [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5021  			CNTR_NORMAL,
5022  			access_sdma_header_address_err_cnt),
5023  [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5024  			CNTR_NORMAL,
5025  			access_sdma_header_select_err_cnt),
5026  [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5027  			CNTR_NORMAL,
5028  			access_sdma_reserved_9_err_cnt),
5029  [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5030  			CNTR_NORMAL,
5031  			access_sdma_packet_desc_overflow_err_cnt),
5032  [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5033  			CNTR_NORMAL,
5034  			access_sdma_length_mismatch_err_cnt),
5035  [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5036  			CNTR_NORMAL,
5037  			access_sdma_halt_err_cnt),
5038  [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5039  			CNTR_NORMAL,
5040  			access_sdma_mem_read_err_cnt),
5041  [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5042  			CNTR_NORMAL,
5043  			access_sdma_first_desc_err_cnt),
5044  [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5045  			CNTR_NORMAL,
5046  			access_sdma_tail_out_of_bounds_err_cnt),
5047  [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5048  			CNTR_NORMAL,
5049  			access_sdma_too_long_err_cnt),
5050  [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5051  			CNTR_NORMAL,
5052  			access_sdma_gen_mismatch_err_cnt),
5053  [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5054  			CNTR_NORMAL,
5055  			access_sdma_wrong_dw_err_cnt),
5056  };
5057  
5058  static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5059  [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5060  			CNTR_NORMAL),
5061  [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5062  			CNTR_NORMAL),
5063  [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5064  			CNTR_NORMAL),
5065  [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5066  			CNTR_NORMAL),
5067  [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5068  			CNTR_NORMAL),
5069  [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5070  			CNTR_NORMAL),
5071  [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5072  			CNTR_NORMAL),
5073  [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5074  [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5075  [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5076  [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5077  				      CNTR_SYNTH | CNTR_VL),
5078  [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5079  				     CNTR_SYNTH | CNTR_VL),
5080  [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5081  				      CNTR_SYNTH | CNTR_VL),
5082  [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5083  [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5084  [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5085  			     access_sw_link_dn_cnt),
5086  [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5087  			   access_sw_link_up_cnt),
5088  [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5089  				 access_sw_unknown_frame_cnt),
5090  [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5091  			     access_sw_xmit_discards),
5092  [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5093  				CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5094  				access_sw_xmit_discards),
5095  [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5096  				 access_xmit_constraint_errs),
5097  [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5098  				access_rcv_constraint_errs),
5099  [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5100  [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5101  [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5102  [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5103  [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5104  [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5105  [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5106  [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5107  [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5108  [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5109  [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5110  [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5111  [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5112  [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5113  			       access_sw_cpu_rc_acks),
5114  [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5115  				access_sw_cpu_rc_qacks),
5116  [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5117  				       access_sw_cpu_rc_delayed_comp),
5118  [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5119  [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5120  [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5121  [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5122  [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5123  [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5124  [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5125  [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5126  [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5127  [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5128  [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5129  [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5130  [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5131  [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5132  [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5133  [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5134  [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5135  [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5136  [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5137  [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5138  [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5139  [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5140  [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5141  [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5142  [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5143  [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5144  [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5145  [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5146  [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5147  [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5148  [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5149  [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5150  [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5151  [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5152  [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5153  [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5154  [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5155  [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5156  [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5157  [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5158  [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5159  [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5160  [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5161  [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5162  [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5163  [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5164  [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5165  [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5166  [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5167  [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5168  [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5169  [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5170  [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5171  [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5172  [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5173  [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5174  [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5175  [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5176  [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5177  [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5178  [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5179  [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5180  [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5181  [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5182  [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5183  [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5184  [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5185  [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5186  [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5187  [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5188  [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5189  [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5190  [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5191  [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5192  [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5193  [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5194  [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5195  [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5196  [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5197  [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5198  };
5199  
5200  /* ======================================================================== */
5201  
5202  /* return true if this is chip revision revision a */
is_ax(struct hfi1_devdata * dd)5203  int is_ax(struct hfi1_devdata *dd)
5204  {
5205  	u8 chip_rev_minor =
5206  		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5207  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5208  	return (chip_rev_minor & 0xf0) == 0;
5209  }
5210  
5211  /* return true if this is chip revision revision b */
is_bx(struct hfi1_devdata * dd)5212  int is_bx(struct hfi1_devdata *dd)
5213  {
5214  	u8 chip_rev_minor =
5215  		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5216  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5217  	return (chip_rev_minor & 0xF0) == 0x10;
5218  }
5219  
5220  /* return true is kernel urg disabled for rcd */
is_urg_masked(struct hfi1_ctxtdata * rcd)5221  bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5222  {
5223  	u64 mask;
5224  	u32 is = IS_RCVURGENT_START + rcd->ctxt;
5225  	u8 bit = is % 64;
5226  
5227  	mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5228  	return !(mask & BIT_ULL(bit));
5229  }
5230  
5231  /*
5232   * Append string s to buffer buf.  Arguments curp and len are the current
5233   * position and remaining length, respectively.
5234   *
5235   * return 0 on success, 1 on out of room
5236   */
append_str(char * buf,char ** curp,int * lenp,const char * s)5237  static int append_str(char *buf, char **curp, int *lenp, const char *s)
5238  {
5239  	char *p = *curp;
5240  	int len = *lenp;
5241  	int result = 0; /* success */
5242  	char c;
5243  
5244  	/* add a comma, if first in the buffer */
5245  	if (p != buf) {
5246  		if (len == 0) {
5247  			result = 1; /* out of room */
5248  			goto done;
5249  		}
5250  		*p++ = ',';
5251  		len--;
5252  	}
5253  
5254  	/* copy the string */
5255  	while ((c = *s++) != 0) {
5256  		if (len == 0) {
5257  			result = 1; /* out of room */
5258  			goto done;
5259  		}
5260  		*p++ = c;
5261  		len--;
5262  	}
5263  
5264  done:
5265  	/* write return values */
5266  	*curp = p;
5267  	*lenp = len;
5268  
5269  	return result;
5270  }
5271  
5272  /*
5273   * Using the given flag table, print a comma separated string into
5274   * the buffer.  End in '*' if the buffer is too short.
5275   */
flag_string(char * buf,int buf_len,u64 flags,struct flag_table * table,int table_size)5276  static char *flag_string(char *buf, int buf_len, u64 flags,
5277  			 struct flag_table *table, int table_size)
5278  {
5279  	char extra[32];
5280  	char *p = buf;
5281  	int len = buf_len;
5282  	int no_room = 0;
5283  	int i;
5284  
5285  	/* make sure there is at least 2 so we can form "*" */
5286  	if (len < 2)
5287  		return "";
5288  
5289  	len--;	/* leave room for a nul */
5290  	for (i = 0; i < table_size; i++) {
5291  		if (flags & table[i].flag) {
5292  			no_room = append_str(buf, &p, &len, table[i].str);
5293  			if (no_room)
5294  				break;
5295  			flags &= ~table[i].flag;
5296  		}
5297  	}
5298  
5299  	/* any undocumented bits left? */
5300  	if (!no_room && flags) {
5301  		snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5302  		no_room = append_str(buf, &p, &len, extra);
5303  	}
5304  
5305  	/* add * if ran out of room */
5306  	if (no_room) {
5307  		/* may need to back up to add space for a '*' */
5308  		if (len == 0)
5309  			--p;
5310  		*p++ = '*';
5311  	}
5312  
5313  	/* add final nul - space already allocated above */
5314  	*p = 0;
5315  	return buf;
5316  }
5317  
5318  /* first 8 CCE error interrupt source names */
5319  static const char * const cce_misc_names[] = {
5320  	"CceErrInt",		/* 0 */
5321  	"RxeErrInt",		/* 1 */
5322  	"MiscErrInt",		/* 2 */
5323  	"Reserved3",		/* 3 */
5324  	"PioErrInt",		/* 4 */
5325  	"SDmaErrInt",		/* 5 */
5326  	"EgressErrInt",		/* 6 */
5327  	"TxeErrInt"		/* 7 */
5328  };
5329  
5330  /*
5331   * Return the miscellaneous error interrupt name.
5332   */
is_misc_err_name(char * buf,size_t bsize,unsigned int source)5333  static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5334  {
5335  	if (source < ARRAY_SIZE(cce_misc_names))
5336  		strncpy(buf, cce_misc_names[source], bsize);
5337  	else
5338  		snprintf(buf, bsize, "Reserved%u",
5339  			 source + IS_GENERAL_ERR_START);
5340  
5341  	return buf;
5342  }
5343  
5344  /*
5345   * Return the SDMA engine error interrupt name.
5346   */
is_sdma_eng_err_name(char * buf,size_t bsize,unsigned int source)5347  static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5348  {
5349  	snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5350  	return buf;
5351  }
5352  
5353  /*
5354   * Return the send context error interrupt name.
5355   */
is_sendctxt_err_name(char * buf,size_t bsize,unsigned int source)5356  static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5357  {
5358  	snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5359  	return buf;
5360  }
5361  
5362  static const char * const various_names[] = {
5363  	"PbcInt",
5364  	"GpioAssertInt",
5365  	"Qsfp1Int",
5366  	"Qsfp2Int",
5367  	"TCritInt"
5368  };
5369  
5370  /*
5371   * Return the various interrupt name.
5372   */
is_various_name(char * buf,size_t bsize,unsigned int source)5373  static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5374  {
5375  	if (source < ARRAY_SIZE(various_names))
5376  		strncpy(buf, various_names[source], bsize);
5377  	else
5378  		snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5379  	return buf;
5380  }
5381  
5382  /*
5383   * Return the DC interrupt name.
5384   */
is_dc_name(char * buf,size_t bsize,unsigned int source)5385  static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5386  {
5387  	static const char * const dc_int_names[] = {
5388  		"common",
5389  		"lcb",
5390  		"8051",
5391  		"lbm"	/* local block merge */
5392  	};
5393  
5394  	if (source < ARRAY_SIZE(dc_int_names))
5395  		snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5396  	else
5397  		snprintf(buf, bsize, "DCInt%u", source);
5398  	return buf;
5399  }
5400  
5401  static const char * const sdma_int_names[] = {
5402  	"SDmaInt",
5403  	"SdmaIdleInt",
5404  	"SdmaProgressInt",
5405  };
5406  
5407  /*
5408   * Return the SDMA engine interrupt name.
5409   */
is_sdma_eng_name(char * buf,size_t bsize,unsigned int source)5410  static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5411  {
5412  	/* what interrupt */
5413  	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5414  	/* which engine */
5415  	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5416  
5417  	if (likely(what < 3))
5418  		snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5419  	else
5420  		snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5421  	return buf;
5422  }
5423  
5424  /*
5425   * Return the receive available interrupt name.
5426   */
is_rcv_avail_name(char * buf,size_t bsize,unsigned int source)5427  static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5428  {
5429  	snprintf(buf, bsize, "RcvAvailInt%u", source);
5430  	return buf;
5431  }
5432  
5433  /*
5434   * Return the receive urgent interrupt name.
5435   */
is_rcv_urgent_name(char * buf,size_t bsize,unsigned int source)5436  static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5437  {
5438  	snprintf(buf, bsize, "RcvUrgentInt%u", source);
5439  	return buf;
5440  }
5441  
5442  /*
5443   * Return the send credit interrupt name.
5444   */
is_send_credit_name(char * buf,size_t bsize,unsigned int source)5445  static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5446  {
5447  	snprintf(buf, bsize, "SendCreditInt%u", source);
5448  	return buf;
5449  }
5450  
5451  /*
5452   * Return the reserved interrupt name.
5453   */
is_reserved_name(char * buf,size_t bsize,unsigned int source)5454  static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5455  {
5456  	snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5457  	return buf;
5458  }
5459  
cce_err_status_string(char * buf,int buf_len,u64 flags)5460  static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5461  {
5462  	return flag_string(buf, buf_len, flags,
5463  			   cce_err_status_flags,
5464  			   ARRAY_SIZE(cce_err_status_flags));
5465  }
5466  
rxe_err_status_string(char * buf,int buf_len,u64 flags)5467  static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5468  {
5469  	return flag_string(buf, buf_len, flags,
5470  			   rxe_err_status_flags,
5471  			   ARRAY_SIZE(rxe_err_status_flags));
5472  }
5473  
misc_err_status_string(char * buf,int buf_len,u64 flags)5474  static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5475  {
5476  	return flag_string(buf, buf_len, flags, misc_err_status_flags,
5477  			   ARRAY_SIZE(misc_err_status_flags));
5478  }
5479  
pio_err_status_string(char * buf,int buf_len,u64 flags)5480  static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5481  {
5482  	return flag_string(buf, buf_len, flags,
5483  			   pio_err_status_flags,
5484  			   ARRAY_SIZE(pio_err_status_flags));
5485  }
5486  
sdma_err_status_string(char * buf,int buf_len,u64 flags)5487  static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5488  {
5489  	return flag_string(buf, buf_len, flags,
5490  			   sdma_err_status_flags,
5491  			   ARRAY_SIZE(sdma_err_status_flags));
5492  }
5493  
egress_err_status_string(char * buf,int buf_len,u64 flags)5494  static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5495  {
5496  	return flag_string(buf, buf_len, flags,
5497  			   egress_err_status_flags,
5498  			   ARRAY_SIZE(egress_err_status_flags));
5499  }
5500  
egress_err_info_string(char * buf,int buf_len,u64 flags)5501  static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5502  {
5503  	return flag_string(buf, buf_len, flags,
5504  			   egress_err_info_flags,
5505  			   ARRAY_SIZE(egress_err_info_flags));
5506  }
5507  
send_err_status_string(char * buf,int buf_len,u64 flags)5508  static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5509  {
5510  	return flag_string(buf, buf_len, flags,
5511  			   send_err_status_flags,
5512  			   ARRAY_SIZE(send_err_status_flags));
5513  }
5514  
handle_cce_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5515  static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5516  {
5517  	char buf[96];
5518  	int i = 0;
5519  
5520  	/*
5521  	 * For most these errors, there is nothing that can be done except
5522  	 * report or record it.
5523  	 */
5524  	dd_dev_info(dd, "CCE Error: %s\n",
5525  		    cce_err_status_string(buf, sizeof(buf), reg));
5526  
5527  	if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5528  	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5529  		/* this error requires a manual drop into SPC freeze mode */
5530  		/* then a fix up */
5531  		start_freeze_handling(dd->pport, FREEZE_SELF);
5532  	}
5533  
5534  	for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5535  		if (reg & (1ull << i)) {
5536  			incr_cntr64(&dd->cce_err_status_cnt[i]);
5537  			/* maintain a counter over all cce_err_status errors */
5538  			incr_cntr64(&dd->sw_cce_err_status_aggregate);
5539  		}
5540  	}
5541  }
5542  
5543  /*
5544   * Check counters for receive errors that do not have an interrupt
5545   * associated with them.
5546   */
5547  #define RCVERR_CHECK_TIME 10
update_rcverr_timer(struct timer_list * t)5548  static void update_rcverr_timer(struct timer_list *t)
5549  {
5550  	struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5551  	struct hfi1_pportdata *ppd = dd->pport;
5552  	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5553  
5554  	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5555  	    ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5556  		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5557  		set_link_down_reason(
5558  		ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5559  		OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5560  		queue_work(ppd->link_wq, &ppd->link_bounce_work);
5561  	}
5562  	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5563  
5564  	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5565  }
5566  
init_rcverr(struct hfi1_devdata * dd)5567  static int init_rcverr(struct hfi1_devdata *dd)
5568  {
5569  	timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5570  	/* Assume the hardware counter has been reset */
5571  	dd->rcv_ovfl_cnt = 0;
5572  	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5573  }
5574  
free_rcverr(struct hfi1_devdata * dd)5575  static void free_rcverr(struct hfi1_devdata *dd)
5576  {
5577  	if (dd->rcverr_timer.function)
5578  		del_timer_sync(&dd->rcverr_timer);
5579  }
5580  
handle_rxe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5581  static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5582  {
5583  	char buf[96];
5584  	int i = 0;
5585  
5586  	dd_dev_info(dd, "Receive Error: %s\n",
5587  		    rxe_err_status_string(buf, sizeof(buf), reg));
5588  
5589  	if (reg & ALL_RXE_FREEZE_ERR) {
5590  		int flags = 0;
5591  
5592  		/*
5593  		 * Freeze mode recovery is disabled for the errors
5594  		 * in RXE_FREEZE_ABORT_MASK
5595  		 */
5596  		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5597  			flags = FREEZE_ABORT;
5598  
5599  		start_freeze_handling(dd->pport, flags);
5600  	}
5601  
5602  	for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5603  		if (reg & (1ull << i))
5604  			incr_cntr64(&dd->rcv_err_status_cnt[i]);
5605  	}
5606  }
5607  
handle_misc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5608  static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5609  {
5610  	char buf[96];
5611  	int i = 0;
5612  
5613  	dd_dev_info(dd, "Misc Error: %s",
5614  		    misc_err_status_string(buf, sizeof(buf), reg));
5615  	for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5616  		if (reg & (1ull << i))
5617  			incr_cntr64(&dd->misc_err_status_cnt[i]);
5618  	}
5619  }
5620  
handle_pio_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5621  static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5622  {
5623  	char buf[96];
5624  	int i = 0;
5625  
5626  	dd_dev_info(dd, "PIO Error: %s\n",
5627  		    pio_err_status_string(buf, sizeof(buf), reg));
5628  
5629  	if (reg & ALL_PIO_FREEZE_ERR)
5630  		start_freeze_handling(dd->pport, 0);
5631  
5632  	for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5633  		if (reg & (1ull << i))
5634  			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5635  	}
5636  }
5637  
handle_sdma_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5638  static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5639  {
5640  	char buf[96];
5641  	int i = 0;
5642  
5643  	dd_dev_info(dd, "SDMA Error: %s\n",
5644  		    sdma_err_status_string(buf, sizeof(buf), reg));
5645  
5646  	if (reg & ALL_SDMA_FREEZE_ERR)
5647  		start_freeze_handling(dd->pport, 0);
5648  
5649  	for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5650  		if (reg & (1ull << i))
5651  			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5652  	}
5653  }
5654  
__count_port_discards(struct hfi1_pportdata * ppd)5655  static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5656  {
5657  	incr_cntr64(&ppd->port_xmit_discards);
5658  }
5659  
count_port_inactive(struct hfi1_devdata * dd)5660  static void count_port_inactive(struct hfi1_devdata *dd)
5661  {
5662  	__count_port_discards(dd->pport);
5663  }
5664  
5665  /*
5666   * We have had a "disallowed packet" error during egress. Determine the
5667   * integrity check which failed, and update relevant error counter, etc.
5668   *
5669   * Note that the SEND_EGRESS_ERR_INFO register has only a single
5670   * bit of state per integrity check, and so we can miss the reason for an
5671   * egress error if more than one packet fails the same integrity check
5672   * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5673   */
handle_send_egress_err_info(struct hfi1_devdata * dd,int vl)5674  static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5675  					int vl)
5676  {
5677  	struct hfi1_pportdata *ppd = dd->pport;
5678  	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5679  	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5680  	char buf[96];
5681  
5682  	/* clear down all observed info as quickly as possible after read */
5683  	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5684  
5685  	dd_dev_info(dd,
5686  		    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5687  		    info, egress_err_info_string(buf, sizeof(buf), info), src);
5688  
5689  	/* Eventually add other counters for each bit */
5690  	if (info & PORT_DISCARD_EGRESS_ERRS) {
5691  		int weight, i;
5692  
5693  		/*
5694  		 * Count all applicable bits as individual errors and
5695  		 * attribute them to the packet that triggered this handler.
5696  		 * This may not be completely accurate due to limitations
5697  		 * on the available hardware error information.  There is
5698  		 * a single information register and any number of error
5699  		 * packets may have occurred and contributed to it before
5700  		 * this routine is called.  This means that:
5701  		 * a) If multiple packets with the same error occur before
5702  		 *    this routine is called, earlier packets are missed.
5703  		 *    There is only a single bit for each error type.
5704  		 * b) Errors may not be attributed to the correct VL.
5705  		 *    The driver is attributing all bits in the info register
5706  		 *    to the packet that triggered this call, but bits
5707  		 *    could be an accumulation of different packets with
5708  		 *    different VLs.
5709  		 * c) A single error packet may have multiple counts attached
5710  		 *    to it.  There is no way for the driver to know if
5711  		 *    multiple bits set in the info register are due to a
5712  		 *    single packet or multiple packets.  The driver assumes
5713  		 *    multiple packets.
5714  		 */
5715  		weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5716  		for (i = 0; i < weight; i++) {
5717  			__count_port_discards(ppd);
5718  			if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5719  				incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5720  			else if (vl == 15)
5721  				incr_cntr64(&ppd->port_xmit_discards_vl
5722  					    [C_VL_15]);
5723  		}
5724  	}
5725  }
5726  
5727  /*
5728   * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5729   * register. Does it represent a 'port inactive' error?
5730   */
port_inactive_err(u64 posn)5731  static inline int port_inactive_err(u64 posn)
5732  {
5733  	return (posn >= SEES(TX_LINKDOWN) &&
5734  		posn <= SEES(TX_INCORRECT_LINK_STATE));
5735  }
5736  
5737  /*
5738   * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5739   * register. Does it represent a 'disallowed packet' error?
5740   */
disallowed_pkt_err(int posn)5741  static inline int disallowed_pkt_err(int posn)
5742  {
5743  	return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5744  		posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5745  }
5746  
5747  /*
5748   * Input value is a bit position of one of the SDMA engine disallowed
5749   * packet errors.  Return which engine.  Use of this must be guarded by
5750   * disallowed_pkt_err().
5751   */
disallowed_pkt_engine(int posn)5752  static inline int disallowed_pkt_engine(int posn)
5753  {
5754  	return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5755  }
5756  
5757  /*
5758   * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5759   * be done.
5760   */
engine_to_vl(struct hfi1_devdata * dd,int engine)5761  static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5762  {
5763  	struct sdma_vl_map *m;
5764  	int vl;
5765  
5766  	/* range check */
5767  	if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5768  		return -1;
5769  
5770  	rcu_read_lock();
5771  	m = rcu_dereference(dd->sdma_map);
5772  	vl = m->engine_to_vl[engine];
5773  	rcu_read_unlock();
5774  
5775  	return vl;
5776  }
5777  
5778  /*
5779   * Translate the send context (sofware index) into a VL.  Return -1 if the
5780   * translation cannot be done.
5781   */
sc_to_vl(struct hfi1_devdata * dd,int sw_index)5782  static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5783  {
5784  	struct send_context_info *sci;
5785  	struct send_context *sc;
5786  	int i;
5787  
5788  	sci = &dd->send_contexts[sw_index];
5789  
5790  	/* there is no information for user (PSM) and ack contexts */
5791  	if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5792  		return -1;
5793  
5794  	sc = sci->sc;
5795  	if (!sc)
5796  		return -1;
5797  	if (dd->vld[15].sc == sc)
5798  		return 15;
5799  	for (i = 0; i < num_vls; i++)
5800  		if (dd->vld[i].sc == sc)
5801  			return i;
5802  
5803  	return -1;
5804  }
5805  
handle_egress_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5806  static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5807  {
5808  	u64 reg_copy = reg, handled = 0;
5809  	char buf[96];
5810  	int i = 0;
5811  
5812  	if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5813  		start_freeze_handling(dd->pport, 0);
5814  	else if (is_ax(dd) &&
5815  		 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5816  		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5817  		start_freeze_handling(dd->pport, 0);
5818  
5819  	while (reg_copy) {
5820  		int posn = fls64(reg_copy);
5821  		/* fls64() returns a 1-based offset, we want it zero based */
5822  		int shift = posn - 1;
5823  		u64 mask = 1ULL << shift;
5824  
5825  		if (port_inactive_err(shift)) {
5826  			count_port_inactive(dd);
5827  			handled |= mask;
5828  		} else if (disallowed_pkt_err(shift)) {
5829  			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5830  
5831  			handle_send_egress_err_info(dd, vl);
5832  			handled |= mask;
5833  		}
5834  		reg_copy &= ~mask;
5835  	}
5836  
5837  	reg &= ~handled;
5838  
5839  	if (reg)
5840  		dd_dev_info(dd, "Egress Error: %s\n",
5841  			    egress_err_status_string(buf, sizeof(buf), reg));
5842  
5843  	for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5844  		if (reg & (1ull << i))
5845  			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5846  	}
5847  }
5848  
handle_txe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5849  static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5850  {
5851  	char buf[96];
5852  	int i = 0;
5853  
5854  	dd_dev_info(dd, "Send Error: %s\n",
5855  		    send_err_status_string(buf, sizeof(buf), reg));
5856  
5857  	for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5858  		if (reg & (1ull << i))
5859  			incr_cntr64(&dd->send_err_status_cnt[i]);
5860  	}
5861  }
5862  
5863  /*
5864   * The maximum number of times the error clear down will loop before
5865   * blocking a repeating error.  This value is arbitrary.
5866   */
5867  #define MAX_CLEAR_COUNT 20
5868  
5869  /*
5870   * Clear and handle an error register.  All error interrupts are funneled
5871   * through here to have a central location to correctly handle single-
5872   * or multi-shot errors.
5873   *
5874   * For non per-context registers, call this routine with a context value
5875   * of 0 so the per-context offset is zero.
5876   *
5877   * If the handler loops too many times, assume that something is wrong
5878   * and can't be fixed, so mask the error bits.
5879   */
interrupt_clear_down(struct hfi1_devdata * dd,u32 context,const struct err_reg_info * eri)5880  static void interrupt_clear_down(struct hfi1_devdata *dd,
5881  				 u32 context,
5882  				 const struct err_reg_info *eri)
5883  {
5884  	u64 reg;
5885  	u32 count;
5886  
5887  	/* read in a loop until no more errors are seen */
5888  	count = 0;
5889  	while (1) {
5890  		reg = read_kctxt_csr(dd, context, eri->status);
5891  		if (reg == 0)
5892  			break;
5893  		write_kctxt_csr(dd, context, eri->clear, reg);
5894  		if (likely(eri->handler))
5895  			eri->handler(dd, context, reg);
5896  		count++;
5897  		if (count > MAX_CLEAR_COUNT) {
5898  			u64 mask;
5899  
5900  			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5901  				   eri->desc, reg);
5902  			/*
5903  			 * Read-modify-write so any other masked bits
5904  			 * remain masked.
5905  			 */
5906  			mask = read_kctxt_csr(dd, context, eri->mask);
5907  			mask &= ~reg;
5908  			write_kctxt_csr(dd, context, eri->mask, mask);
5909  			break;
5910  		}
5911  	}
5912  }
5913  
5914  /*
5915   * CCE block "misc" interrupt.  Source is < 16.
5916   */
is_misc_err_int(struct hfi1_devdata * dd,unsigned int source)5917  static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5918  {
5919  	const struct err_reg_info *eri = &misc_errs[source];
5920  
5921  	if (eri->handler) {
5922  		interrupt_clear_down(dd, 0, eri);
5923  	} else {
5924  		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5925  			   source);
5926  	}
5927  }
5928  
send_context_err_status_string(char * buf,int buf_len,u64 flags)5929  static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5930  {
5931  	return flag_string(buf, buf_len, flags,
5932  			   sc_err_status_flags,
5933  			   ARRAY_SIZE(sc_err_status_flags));
5934  }
5935  
5936  /*
5937   * Send context error interrupt.  Source (hw_context) is < 160.
5938   *
5939   * All send context errors cause the send context to halt.  The normal
5940   * clear-down mechanism cannot be used because we cannot clear the
5941   * error bits until several other long-running items are done first.
5942   * This is OK because with the context halted, nothing else is going
5943   * to happen on it anyway.
5944   */
is_sendctxt_err_int(struct hfi1_devdata * dd,unsigned int hw_context)5945  static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5946  				unsigned int hw_context)
5947  {
5948  	struct send_context_info *sci;
5949  	struct send_context *sc;
5950  	char flags[96];
5951  	u64 status;
5952  	u32 sw_index;
5953  	int i = 0;
5954  	unsigned long irq_flags;
5955  
5956  	sw_index = dd->hw_to_sw[hw_context];
5957  	if (sw_index >= dd->num_send_contexts) {
5958  		dd_dev_err(dd,
5959  			   "out of range sw index %u for send context %u\n",
5960  			   sw_index, hw_context);
5961  		return;
5962  	}
5963  	sci = &dd->send_contexts[sw_index];
5964  	spin_lock_irqsave(&dd->sc_lock, irq_flags);
5965  	sc = sci->sc;
5966  	if (!sc) {
5967  		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5968  			   sw_index, hw_context);
5969  		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5970  		return;
5971  	}
5972  
5973  	/* tell the software that a halt has begun */
5974  	sc_stop(sc, SCF_HALTED);
5975  
5976  	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5977  
5978  	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5979  		    send_context_err_status_string(flags, sizeof(flags),
5980  						   status));
5981  
5982  	if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5983  		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5984  
5985  	/*
5986  	 * Automatically restart halted kernel contexts out of interrupt
5987  	 * context.  User contexts must ask the driver to restart the context.
5988  	 */
5989  	if (sc->type != SC_USER)
5990  		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5991  	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5992  
5993  	/*
5994  	 * Update the counters for the corresponding status bits.
5995  	 * Note that these particular counters are aggregated over all
5996  	 * 160 contexts.
5997  	 */
5998  	for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5999  		if (status & (1ull << i))
6000  			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6001  	}
6002  }
6003  
handle_sdma_eng_err(struct hfi1_devdata * dd,unsigned int source,u64 status)6004  static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6005  				unsigned int source, u64 status)
6006  {
6007  	struct sdma_engine *sde;
6008  	int i = 0;
6009  
6010  	sde = &dd->per_sdma[source];
6011  #ifdef CONFIG_SDMA_VERBOSITY
6012  	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6013  		   slashstrip(__FILE__), __LINE__, __func__);
6014  	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6015  		   sde->this_idx, source, (unsigned long long)status);
6016  #endif
6017  	sde->err_cnt++;
6018  	sdma_engine_error(sde, status);
6019  
6020  	/*
6021  	* Update the counters for the corresponding status bits.
6022  	* Note that these particular counters are aggregated over
6023  	* all 16 DMA engines.
6024  	*/
6025  	for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6026  		if (status & (1ull << i))
6027  			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6028  	}
6029  }
6030  
6031  /*
6032   * CCE block SDMA error interrupt.  Source is < 16.
6033   */
is_sdma_eng_err_int(struct hfi1_devdata * dd,unsigned int source)6034  static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6035  {
6036  #ifdef CONFIG_SDMA_VERBOSITY
6037  	struct sdma_engine *sde = &dd->per_sdma[source];
6038  
6039  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6040  		   slashstrip(__FILE__), __LINE__, __func__);
6041  	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6042  		   source);
6043  	sdma_dumpstate(sde);
6044  #endif
6045  	interrupt_clear_down(dd, source, &sdma_eng_err);
6046  }
6047  
6048  /*
6049   * CCE block "various" interrupt.  Source is < 8.
6050   */
is_various_int(struct hfi1_devdata * dd,unsigned int source)6051  static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6052  {
6053  	const struct err_reg_info *eri = &various_err[source];
6054  
6055  	/*
6056  	 * TCritInt cannot go through interrupt_clear_down()
6057  	 * because it is not a second tier interrupt. The handler
6058  	 * should be called directly.
6059  	 */
6060  	if (source == TCRIT_INT_SOURCE)
6061  		handle_temp_err(dd);
6062  	else if (eri->handler)
6063  		interrupt_clear_down(dd, 0, eri);
6064  	else
6065  		dd_dev_info(dd,
6066  			    "%s: Unimplemented/reserved interrupt %d\n",
6067  			    __func__, source);
6068  }
6069  
handle_qsfp_int(struct hfi1_devdata * dd,u32 src_ctx,u64 reg)6070  static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6071  {
6072  	/* src_ctx is always zero */
6073  	struct hfi1_pportdata *ppd = dd->pport;
6074  	unsigned long flags;
6075  	u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6076  
6077  	if (reg & QSFP_HFI0_MODPRST_N) {
6078  		if (!qsfp_mod_present(ppd)) {
6079  			dd_dev_info(dd, "%s: QSFP module removed\n",
6080  				    __func__);
6081  
6082  			ppd->driver_link_ready = 0;
6083  			/*
6084  			 * Cable removed, reset all our information about the
6085  			 * cache and cable capabilities
6086  			 */
6087  
6088  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6089  			/*
6090  			 * We don't set cache_refresh_required here as we expect
6091  			 * an interrupt when a cable is inserted
6092  			 */
6093  			ppd->qsfp_info.cache_valid = 0;
6094  			ppd->qsfp_info.reset_needed = 0;
6095  			ppd->qsfp_info.limiting_active = 0;
6096  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6097  					       flags);
6098  			/* Invert the ModPresent pin now to detect plug-in */
6099  			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6100  				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6101  
6102  			if ((ppd->offline_disabled_reason >
6103  			  HFI1_ODR_MASK(
6104  			  OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6105  			  (ppd->offline_disabled_reason ==
6106  			  HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6107  				ppd->offline_disabled_reason =
6108  				HFI1_ODR_MASK(
6109  				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6110  
6111  			if (ppd->host_link_state == HLS_DN_POLL) {
6112  				/*
6113  				 * The link is still in POLL. This means
6114  				 * that the normal link down processing
6115  				 * will not happen. We have to do it here
6116  				 * before turning the DC off.
6117  				 */
6118  				queue_work(ppd->link_wq, &ppd->link_down_work);
6119  			}
6120  		} else {
6121  			dd_dev_info(dd, "%s: QSFP module inserted\n",
6122  				    __func__);
6123  
6124  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6125  			ppd->qsfp_info.cache_valid = 0;
6126  			ppd->qsfp_info.cache_refresh_required = 1;
6127  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6128  					       flags);
6129  
6130  			/*
6131  			 * Stop inversion of ModPresent pin to detect
6132  			 * removal of the cable
6133  			 */
6134  			qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6135  			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6136  				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6137  
6138  			ppd->offline_disabled_reason =
6139  				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6140  		}
6141  	}
6142  
6143  	if (reg & QSFP_HFI0_INT_N) {
6144  		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6145  			    __func__);
6146  		spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6147  		ppd->qsfp_info.check_interrupt_flags = 1;
6148  		spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6149  	}
6150  
6151  	/* Schedule the QSFP work only if there is a cable attached. */
6152  	if (qsfp_mod_present(ppd))
6153  		queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6154  }
6155  
request_host_lcb_access(struct hfi1_devdata * dd)6156  static int request_host_lcb_access(struct hfi1_devdata *dd)
6157  {
6158  	int ret;
6159  
6160  	ret = do_8051_command(dd, HCMD_MISC,
6161  			      (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6162  			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6163  	if (ret != HCMD_SUCCESS) {
6164  		dd_dev_err(dd, "%s: command failed with error %d\n",
6165  			   __func__, ret);
6166  	}
6167  	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6168  }
6169  
request_8051_lcb_access(struct hfi1_devdata * dd)6170  static int request_8051_lcb_access(struct hfi1_devdata *dd)
6171  {
6172  	int ret;
6173  
6174  	ret = do_8051_command(dd, HCMD_MISC,
6175  			      (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6176  			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6177  	if (ret != HCMD_SUCCESS) {
6178  		dd_dev_err(dd, "%s: command failed with error %d\n",
6179  			   __func__, ret);
6180  	}
6181  	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6182  }
6183  
6184  /*
6185   * Set the LCB selector - allow host access.  The DCC selector always
6186   * points to the host.
6187   */
set_host_lcb_access(struct hfi1_devdata * dd)6188  static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6189  {
6190  	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6191  		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6192  		  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6193  }
6194  
6195  /*
6196   * Clear the LCB selector - allow 8051 access.  The DCC selector always
6197   * points to the host.
6198   */
set_8051_lcb_access(struct hfi1_devdata * dd)6199  static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6200  {
6201  	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6202  		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6203  }
6204  
6205  /*
6206   * Acquire LCB access from the 8051.  If the host already has access,
6207   * just increment a counter.  Otherwise, inform the 8051 that the
6208   * host is taking access.
6209   *
6210   * Returns:
6211   *	0 on success
6212   *	-EBUSY if the 8051 has control and cannot be disturbed
6213   *	-errno if unable to acquire access from the 8051
6214   */
acquire_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6215  int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6216  {
6217  	struct hfi1_pportdata *ppd = dd->pport;
6218  	int ret = 0;
6219  
6220  	/*
6221  	 * Use the host link state lock so the operation of this routine
6222  	 * { link state check, selector change, count increment } can occur
6223  	 * as a unit against a link state change.  Otherwise there is a
6224  	 * race between the state change and the count increment.
6225  	 */
6226  	if (sleep_ok) {
6227  		mutex_lock(&ppd->hls_lock);
6228  	} else {
6229  		while (!mutex_trylock(&ppd->hls_lock))
6230  			udelay(1);
6231  	}
6232  
6233  	/* this access is valid only when the link is up */
6234  	if (ppd->host_link_state & HLS_DOWN) {
6235  		dd_dev_info(dd, "%s: link state %s not up\n",
6236  			    __func__, link_state_name(ppd->host_link_state));
6237  		ret = -EBUSY;
6238  		goto done;
6239  	}
6240  
6241  	if (dd->lcb_access_count == 0) {
6242  		ret = request_host_lcb_access(dd);
6243  		if (ret) {
6244  			dd_dev_err(dd,
6245  				   "%s: unable to acquire LCB access, err %d\n",
6246  				   __func__, ret);
6247  			goto done;
6248  		}
6249  		set_host_lcb_access(dd);
6250  	}
6251  	dd->lcb_access_count++;
6252  done:
6253  	mutex_unlock(&ppd->hls_lock);
6254  	return ret;
6255  }
6256  
6257  /*
6258   * Release LCB access by decrementing the use count.  If the count is moving
6259   * from 1 to 0, inform 8051 that it has control back.
6260   *
6261   * Returns:
6262   *	0 on success
6263   *	-errno if unable to release access to the 8051
6264   */
release_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6265  int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6266  {
6267  	int ret = 0;
6268  
6269  	/*
6270  	 * Use the host link state lock because the acquire needed it.
6271  	 * Here, we only need to keep { selector change, count decrement }
6272  	 * as a unit.
6273  	 */
6274  	if (sleep_ok) {
6275  		mutex_lock(&dd->pport->hls_lock);
6276  	} else {
6277  		while (!mutex_trylock(&dd->pport->hls_lock))
6278  			udelay(1);
6279  	}
6280  
6281  	if (dd->lcb_access_count == 0) {
6282  		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6283  			   __func__);
6284  		goto done;
6285  	}
6286  
6287  	if (dd->lcb_access_count == 1) {
6288  		set_8051_lcb_access(dd);
6289  		ret = request_8051_lcb_access(dd);
6290  		if (ret) {
6291  			dd_dev_err(dd,
6292  				   "%s: unable to release LCB access, err %d\n",
6293  				   __func__, ret);
6294  			/* restore host access if the grant didn't work */
6295  			set_host_lcb_access(dd);
6296  			goto done;
6297  		}
6298  	}
6299  	dd->lcb_access_count--;
6300  done:
6301  	mutex_unlock(&dd->pport->hls_lock);
6302  	return ret;
6303  }
6304  
6305  /*
6306   * Initialize LCB access variables and state.  Called during driver load,
6307   * after most of the initialization is finished.
6308   *
6309   * The DC default is LCB access on for the host.  The driver defaults to
6310   * leaving access to the 8051.  Assign access now - this constrains the call
6311   * to this routine to be after all LCB set-up is done.  In particular, after
6312   * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6313   */
init_lcb_access(struct hfi1_devdata * dd)6314  static void init_lcb_access(struct hfi1_devdata *dd)
6315  {
6316  	dd->lcb_access_count = 0;
6317  }
6318  
6319  /*
6320   * Write a response back to a 8051 request.
6321   */
hreq_response(struct hfi1_devdata * dd,u8 return_code,u16 rsp_data)6322  static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6323  {
6324  	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6325  		  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6326  		  (u64)return_code <<
6327  		  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6328  		  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6329  }
6330  
6331  /*
6332   * Handle host requests from the 8051.
6333   */
handle_8051_request(struct hfi1_pportdata * ppd)6334  static void handle_8051_request(struct hfi1_pportdata *ppd)
6335  {
6336  	struct hfi1_devdata *dd = ppd->dd;
6337  	u64 reg;
6338  	u16 data = 0;
6339  	u8 type;
6340  
6341  	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6342  	if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6343  		return;	/* no request */
6344  
6345  	/* zero out COMPLETED so the response is seen */
6346  	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6347  
6348  	/* extract request details */
6349  	type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6350  			& DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6351  	data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6352  			& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6353  
6354  	switch (type) {
6355  	case HREQ_LOAD_CONFIG:
6356  	case HREQ_SAVE_CONFIG:
6357  	case HREQ_READ_CONFIG:
6358  	case HREQ_SET_TX_EQ_ABS:
6359  	case HREQ_SET_TX_EQ_REL:
6360  	case HREQ_ENABLE:
6361  		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6362  			    type);
6363  		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6364  		break;
6365  	case HREQ_LCB_RESET:
6366  		/* Put the LCB, RX FPE and TX FPE into reset */
6367  		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6368  		/* Make sure the write completed */
6369  		(void)read_csr(dd, DCC_CFG_RESET);
6370  		/* Hold the reset long enough to take effect */
6371  		udelay(1);
6372  		/* Take the LCB, RX FPE and TX FPE out of reset */
6373  		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6374  		hreq_response(dd, HREQ_SUCCESS, 0);
6375  
6376  		break;
6377  	case HREQ_CONFIG_DONE:
6378  		hreq_response(dd, HREQ_SUCCESS, 0);
6379  		break;
6380  
6381  	case HREQ_INTERFACE_TEST:
6382  		hreq_response(dd, HREQ_SUCCESS, data);
6383  		break;
6384  	default:
6385  		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6386  		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6387  		break;
6388  	}
6389  }
6390  
6391  /*
6392   * Set up allocation unit vaulue.
6393   */
set_up_vau(struct hfi1_devdata * dd,u8 vau)6394  void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6395  {
6396  	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6397  
6398  	/* do not modify other values in the register */
6399  	reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6400  	reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6401  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6402  }
6403  
6404  /*
6405   * Set up initial VL15 credits of the remote.  Assumes the rest of
6406   * the CM credit registers are zero from a previous global or credit reset.
6407   * Shared limit for VL15 will always be 0.
6408   */
set_up_vl15(struct hfi1_devdata * dd,u16 vl15buf)6409  void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6410  {
6411  	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6412  
6413  	/* set initial values for total and shared credit limit */
6414  	reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6415  		 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6416  
6417  	/*
6418  	 * Set total limit to be equal to VL15 credits.
6419  	 * Leave shared limit at 0.
6420  	 */
6421  	reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6422  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6423  
6424  	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6425  		  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6426  }
6427  
6428  /*
6429   * Zero all credit details from the previous connection and
6430   * reset the CM manager's internal counters.
6431   */
reset_link_credits(struct hfi1_devdata * dd)6432  void reset_link_credits(struct hfi1_devdata *dd)
6433  {
6434  	int i;
6435  
6436  	/* remove all previous VL credit limits */
6437  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
6438  		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6439  	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6440  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6441  	/* reset the CM block */
6442  	pio_send_control(dd, PSC_CM_RESET);
6443  	/* reset cached value */
6444  	dd->vl15buf_cached = 0;
6445  }
6446  
6447  /* convert a vCU to a CU */
vcu_to_cu(u8 vcu)6448  static u32 vcu_to_cu(u8 vcu)
6449  {
6450  	return 1 << vcu;
6451  }
6452  
6453  /* convert a CU to a vCU */
cu_to_vcu(u32 cu)6454  static u8 cu_to_vcu(u32 cu)
6455  {
6456  	return ilog2(cu);
6457  }
6458  
6459  /* convert a vAU to an AU */
vau_to_au(u8 vau)6460  static u32 vau_to_au(u8 vau)
6461  {
6462  	return 8 * (1 << vau);
6463  }
6464  
set_linkup_defaults(struct hfi1_pportdata * ppd)6465  static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6466  {
6467  	ppd->sm_trap_qp = 0x0;
6468  	ppd->sa_qp = 0x1;
6469  }
6470  
6471  /*
6472   * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6473   */
lcb_shutdown(struct hfi1_devdata * dd,int abort)6474  static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6475  {
6476  	u64 reg;
6477  
6478  	/* clear lcb run: LCB_CFG_RUN.EN = 0 */
6479  	write_csr(dd, DC_LCB_CFG_RUN, 0);
6480  	/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6481  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6482  		  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6483  	/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6484  	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6485  	reg = read_csr(dd, DCC_CFG_RESET);
6486  	write_csr(dd, DCC_CFG_RESET, reg |
6487  		  DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6488  	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6489  	if (!abort) {
6490  		udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6491  		write_csr(dd, DCC_CFG_RESET, reg);
6492  		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6493  	}
6494  }
6495  
6496  /*
6497   * This routine should be called after the link has been transitioned to
6498   * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6499   * reset).
6500   *
6501   * The expectation is that the caller of this routine would have taken
6502   * care of properly transitioning the link into the correct state.
6503   * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6504   *       before calling this function.
6505   */
_dc_shutdown(struct hfi1_devdata * dd)6506  static void _dc_shutdown(struct hfi1_devdata *dd)
6507  {
6508  	lockdep_assert_held(&dd->dc8051_lock);
6509  
6510  	if (dd->dc_shutdown)
6511  		return;
6512  
6513  	dd->dc_shutdown = 1;
6514  	/* Shutdown the LCB */
6515  	lcb_shutdown(dd, 1);
6516  	/*
6517  	 * Going to OFFLINE would have causes the 8051 to put the
6518  	 * SerDes into reset already. Just need to shut down the 8051,
6519  	 * itself.
6520  	 */
6521  	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6522  }
6523  
dc_shutdown(struct hfi1_devdata * dd)6524  static void dc_shutdown(struct hfi1_devdata *dd)
6525  {
6526  	mutex_lock(&dd->dc8051_lock);
6527  	_dc_shutdown(dd);
6528  	mutex_unlock(&dd->dc8051_lock);
6529  }
6530  
6531  /*
6532   * Calling this after the DC has been brought out of reset should not
6533   * do any damage.
6534   * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6535   *       before calling this function.
6536   */
_dc_start(struct hfi1_devdata * dd)6537  static void _dc_start(struct hfi1_devdata *dd)
6538  {
6539  	lockdep_assert_held(&dd->dc8051_lock);
6540  
6541  	if (!dd->dc_shutdown)
6542  		return;
6543  
6544  	/* Take the 8051 out of reset */
6545  	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6546  	/* Wait until 8051 is ready */
6547  	if (wait_fm_ready(dd, TIMEOUT_8051_START))
6548  		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6549  			   __func__);
6550  
6551  	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6552  	write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6553  	/* lcb_shutdown() with abort=1 does not restore these */
6554  	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6555  	dd->dc_shutdown = 0;
6556  }
6557  
dc_start(struct hfi1_devdata * dd)6558  static void dc_start(struct hfi1_devdata *dd)
6559  {
6560  	mutex_lock(&dd->dc8051_lock);
6561  	_dc_start(dd);
6562  	mutex_unlock(&dd->dc8051_lock);
6563  }
6564  
6565  /*
6566   * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6567   */
adjust_lcb_for_fpga_serdes(struct hfi1_devdata * dd)6568  static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6569  {
6570  	u64 rx_radr, tx_radr;
6571  	u32 version;
6572  
6573  	if (dd->icode != ICODE_FPGA_EMULATION)
6574  		return;
6575  
6576  	/*
6577  	 * These LCB defaults on emulator _s are good, nothing to do here:
6578  	 *	LCB_CFG_TX_FIFOS_RADR
6579  	 *	LCB_CFG_RX_FIFOS_RADR
6580  	 *	LCB_CFG_LN_DCLK
6581  	 *	LCB_CFG_IGNORE_LOST_RCLK
6582  	 */
6583  	if (is_emulator_s(dd))
6584  		return;
6585  	/* else this is _p */
6586  
6587  	version = emulator_rev(dd);
6588  	if (!is_ax(dd))
6589  		version = 0x2d;	/* all B0 use 0x2d or higher settings */
6590  
6591  	if (version <= 0x12) {
6592  		/* release 0x12 and below */
6593  
6594  		/*
6595  		 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6596  		 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6597  		 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6598  		 */
6599  		rx_radr =
6600  		      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6601  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6602  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6603  		/*
6604  		 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6605  		 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6606  		 */
6607  		tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6608  	} else if (version <= 0x18) {
6609  		/* release 0x13 up to 0x18 */
6610  		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6611  		rx_radr =
6612  		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615  		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6616  	} else if (version == 0x19) {
6617  		/* release 0x19 */
6618  		/* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6619  		rx_radr =
6620  		      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6621  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6622  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6623  		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6624  	} else if (version == 0x1a) {
6625  		/* release 0x1a */
6626  		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6627  		rx_radr =
6628  		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6629  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6630  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6631  		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6632  		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6633  	} else {
6634  		/* release 0x1b and higher */
6635  		/* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6636  		rx_radr =
6637  		      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6638  		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6639  		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6640  		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6641  	}
6642  
6643  	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6644  	/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6645  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6646  		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6647  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6648  }
6649  
6650  /*
6651   * Handle a SMA idle message
6652   *
6653   * This is a work-queue function outside of the interrupt.
6654   */
handle_sma_message(struct work_struct * work)6655  void handle_sma_message(struct work_struct *work)
6656  {
6657  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6658  							sma_message_work);
6659  	struct hfi1_devdata *dd = ppd->dd;
6660  	u64 msg;
6661  	int ret;
6662  
6663  	/*
6664  	 * msg is bytes 1-4 of the 40-bit idle message - the command code
6665  	 * is stripped off
6666  	 */
6667  	ret = read_idle_sma(dd, &msg);
6668  	if (ret)
6669  		return;
6670  	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6671  	/*
6672  	 * React to the SMA message.  Byte[1] (0 for us) is the command.
6673  	 */
6674  	switch (msg & 0xff) {
6675  	case SMA_IDLE_ARM:
6676  		/*
6677  		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6678  		 * State Transitions
6679  		 *
6680  		 * Only expected in INIT or ARMED, discard otherwise.
6681  		 */
6682  		if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6683  			ppd->neighbor_normal = 1;
6684  		break;
6685  	case SMA_IDLE_ACTIVE:
6686  		/*
6687  		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6688  		 * State Transitions
6689  		 *
6690  		 * Can activate the node.  Discard otherwise.
6691  		 */
6692  		if (ppd->host_link_state == HLS_UP_ARMED &&
6693  		    ppd->is_active_optimize_enabled) {
6694  			ppd->neighbor_normal = 1;
6695  			ret = set_link_state(ppd, HLS_UP_ACTIVE);
6696  			if (ret)
6697  				dd_dev_err(
6698  					dd,
6699  					"%s: received Active SMA idle message, couldn't set link to Active\n",
6700  					__func__);
6701  		}
6702  		break;
6703  	default:
6704  		dd_dev_err(dd,
6705  			   "%s: received unexpected SMA idle message 0x%llx\n",
6706  			   __func__, msg);
6707  		break;
6708  	}
6709  }
6710  
adjust_rcvctrl(struct hfi1_devdata * dd,u64 add,u64 clear)6711  static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6712  {
6713  	u64 rcvctrl;
6714  	unsigned long flags;
6715  
6716  	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6717  	rcvctrl = read_csr(dd, RCV_CTRL);
6718  	rcvctrl |= add;
6719  	rcvctrl &= ~clear;
6720  	write_csr(dd, RCV_CTRL, rcvctrl);
6721  	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6722  }
6723  
add_rcvctrl(struct hfi1_devdata * dd,u64 add)6724  static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6725  {
6726  	adjust_rcvctrl(dd, add, 0);
6727  }
6728  
clear_rcvctrl(struct hfi1_devdata * dd,u64 clear)6729  static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6730  {
6731  	adjust_rcvctrl(dd, 0, clear);
6732  }
6733  
6734  /*
6735   * Called from all interrupt handlers to start handling an SPC freeze.
6736   */
start_freeze_handling(struct hfi1_pportdata * ppd,int flags)6737  void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6738  {
6739  	struct hfi1_devdata *dd = ppd->dd;
6740  	struct send_context *sc;
6741  	int i;
6742  	int sc_flags;
6743  
6744  	if (flags & FREEZE_SELF)
6745  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6746  
6747  	/* enter frozen mode */
6748  	dd->flags |= HFI1_FROZEN;
6749  
6750  	/* notify all SDMA engines that they are going into a freeze */
6751  	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6752  
6753  	sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6754  					      SCF_LINK_DOWN : 0);
6755  	/* do halt pre-handling on all enabled send contexts */
6756  	for (i = 0; i < dd->num_send_contexts; i++) {
6757  		sc = dd->send_contexts[i].sc;
6758  		if (sc && (sc->flags & SCF_ENABLED))
6759  			sc_stop(sc, sc_flags);
6760  	}
6761  
6762  	/* Send context are frozen. Notify user space */
6763  	hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6764  
6765  	if (flags & FREEZE_ABORT) {
6766  		dd_dev_err(dd,
6767  			   "Aborted freeze recovery. Please REBOOT system\n");
6768  		return;
6769  	}
6770  	/* queue non-interrupt handler */
6771  	queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6772  }
6773  
6774  /*
6775   * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6776   * depending on the "freeze" parameter.
6777   *
6778   * No need to return an error if it times out, our only option
6779   * is to proceed anyway.
6780   */
wait_for_freeze_status(struct hfi1_devdata * dd,int freeze)6781  static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6782  {
6783  	unsigned long timeout;
6784  	u64 reg;
6785  
6786  	timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6787  	while (1) {
6788  		reg = read_csr(dd, CCE_STATUS);
6789  		if (freeze) {
6790  			/* waiting until all indicators are set */
6791  			if ((reg & ALL_FROZE) == ALL_FROZE)
6792  				return;	/* all done */
6793  		} else {
6794  			/* waiting until all indicators are clear */
6795  			if ((reg & ALL_FROZE) == 0)
6796  				return; /* all done */
6797  		}
6798  
6799  		if (time_after(jiffies, timeout)) {
6800  			dd_dev_err(dd,
6801  				   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6802  				   freeze ? "" : "un", reg & ALL_FROZE,
6803  				   freeze ? ALL_FROZE : 0ull);
6804  			return;
6805  		}
6806  		usleep_range(80, 120);
6807  	}
6808  }
6809  
6810  /*
6811   * Do all freeze handling for the RXE block.
6812   */
rxe_freeze(struct hfi1_devdata * dd)6813  static void rxe_freeze(struct hfi1_devdata *dd)
6814  {
6815  	int i;
6816  	struct hfi1_ctxtdata *rcd;
6817  
6818  	/* disable port */
6819  	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6820  
6821  	/* disable all receive contexts */
6822  	for (i = 0; i < dd->num_rcv_contexts; i++) {
6823  		rcd = hfi1_rcd_get_by_index(dd, i);
6824  		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6825  		hfi1_rcd_put(rcd);
6826  	}
6827  }
6828  
6829  /*
6830   * Unfreeze handling for the RXE block - kernel contexts only.
6831   * This will also enable the port.  User contexts will do unfreeze
6832   * handling on a per-context basis as they call into the driver.
6833   *
6834   */
rxe_kernel_unfreeze(struct hfi1_devdata * dd)6835  static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6836  {
6837  	u32 rcvmask;
6838  	u16 i;
6839  	struct hfi1_ctxtdata *rcd;
6840  
6841  	/* enable all kernel contexts */
6842  	for (i = 0; i < dd->num_rcv_contexts; i++) {
6843  		rcd = hfi1_rcd_get_by_index(dd, i);
6844  
6845  		/* Ensure all non-user contexts(including vnic) are enabled */
6846  		if (!rcd ||
6847  		    (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6848  			hfi1_rcd_put(rcd);
6849  			continue;
6850  		}
6851  		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6852  		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6853  		rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
6854  			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6855  		hfi1_rcvctrl(dd, rcvmask, rcd);
6856  		hfi1_rcd_put(rcd);
6857  	}
6858  
6859  	/* enable port */
6860  	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6861  }
6862  
6863  /*
6864   * Non-interrupt SPC freeze handling.
6865   *
6866   * This is a work-queue function outside of the triggering interrupt.
6867   */
handle_freeze(struct work_struct * work)6868  void handle_freeze(struct work_struct *work)
6869  {
6870  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6871  								freeze_work);
6872  	struct hfi1_devdata *dd = ppd->dd;
6873  
6874  	/* wait for freeze indicators on all affected blocks */
6875  	wait_for_freeze_status(dd, 1);
6876  
6877  	/* SPC is now frozen */
6878  
6879  	/* do send PIO freeze steps */
6880  	pio_freeze(dd);
6881  
6882  	/* do send DMA freeze steps */
6883  	sdma_freeze(dd);
6884  
6885  	/* do send egress freeze steps - nothing to do */
6886  
6887  	/* do receive freeze steps */
6888  	rxe_freeze(dd);
6889  
6890  	/*
6891  	 * Unfreeze the hardware - clear the freeze, wait for each
6892  	 * block's frozen bit to clear, then clear the frozen flag.
6893  	 */
6894  	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895  	wait_for_freeze_status(dd, 0);
6896  
6897  	if (is_ax(dd)) {
6898  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6899  		wait_for_freeze_status(dd, 1);
6900  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6901  		wait_for_freeze_status(dd, 0);
6902  	}
6903  
6904  	/* do send PIO unfreeze steps for kernel contexts */
6905  	pio_kernel_unfreeze(dd);
6906  
6907  	/* do send DMA unfreeze steps */
6908  	sdma_unfreeze(dd);
6909  
6910  	/* do send egress unfreeze steps - nothing to do */
6911  
6912  	/* do receive unfreeze steps for kernel contexts */
6913  	rxe_kernel_unfreeze(dd);
6914  
6915  	/*
6916  	 * The unfreeze procedure touches global device registers when
6917  	 * it disables and re-enables RXE. Mark the device unfrozen
6918  	 * after all that is done so other parts of the driver waiting
6919  	 * for the device to unfreeze don't do things out of order.
6920  	 *
6921  	 * The above implies that the meaning of HFI1_FROZEN flag is
6922  	 * "Device has gone into freeze mode and freeze mode handling
6923  	 * is still in progress."
6924  	 *
6925  	 * The flag will be removed when freeze mode processing has
6926  	 * completed.
6927  	 */
6928  	dd->flags &= ~HFI1_FROZEN;
6929  	wake_up(&dd->event_queue);
6930  
6931  	/* no longer frozen */
6932  }
6933  
6934  /**
6935   * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6936   * counters.
6937   * @ppd: info of physical Hfi port
6938   * @link_width: new link width after link up or downgrade
6939   *
6940   * Update the PortXmitWait and PortVlXmitWait counters after
6941   * a link up or downgrade event to reflect a link width change.
6942   */
update_xmit_counters(struct hfi1_pportdata * ppd,u16 link_width)6943  static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6944  {
6945  	int i;
6946  	u16 tx_width;
6947  	u16 link_speed;
6948  
6949  	tx_width = tx_link_width(link_width);
6950  	link_speed = get_link_speed(ppd->link_speed_active);
6951  
6952  	/*
6953  	 * There are C_VL_COUNT number of PortVLXmitWait counters.
6954  	 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6955  	 */
6956  	for (i = 0; i < C_VL_COUNT + 1; i++)
6957  		get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6958  }
6959  
6960  /*
6961   * Handle a link up interrupt from the 8051.
6962   *
6963   * This is a work-queue function outside of the interrupt.
6964   */
handle_link_up(struct work_struct * work)6965  void handle_link_up(struct work_struct *work)
6966  {
6967  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6968  						  link_up_work);
6969  	struct hfi1_devdata *dd = ppd->dd;
6970  
6971  	set_link_state(ppd, HLS_UP_INIT);
6972  
6973  	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6974  	read_ltp_rtt(dd);
6975  	/*
6976  	 * OPA specifies that certain counters are cleared on a transition
6977  	 * to link up, so do that.
6978  	 */
6979  	clear_linkup_counters(dd);
6980  	/*
6981  	 * And (re)set link up default values.
6982  	 */
6983  	set_linkup_defaults(ppd);
6984  
6985  	/*
6986  	 * Set VL15 credits. Use cached value from verify cap interrupt.
6987  	 * In case of quick linkup or simulator, vl15 value will be set by
6988  	 * handle_linkup_change. VerifyCap interrupt handler will not be
6989  	 * called in those scenarios.
6990  	 */
6991  	if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6992  		set_up_vl15(dd, dd->vl15buf_cached);
6993  
6994  	/* enforce link speed enabled */
6995  	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6996  		/* oops - current speed is not enabled, bounce */
6997  		dd_dev_err(dd,
6998  			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6999  			   ppd->link_speed_active, ppd->link_speed_enabled);
7000  		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7001  				     OPA_LINKDOWN_REASON_SPEED_POLICY);
7002  		set_link_state(ppd, HLS_DN_OFFLINE);
7003  		start_link(ppd);
7004  	}
7005  }
7006  
7007  /*
7008   * Several pieces of LNI information were cached for SMA in ppd.
7009   * Reset these on link down
7010   */
reset_neighbor_info(struct hfi1_pportdata * ppd)7011  static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7012  {
7013  	ppd->neighbor_guid = 0;
7014  	ppd->neighbor_port_number = 0;
7015  	ppd->neighbor_type = 0;
7016  	ppd->neighbor_fm_security = 0;
7017  }
7018  
7019  static const char * const link_down_reason_strs[] = {
7020  	[OPA_LINKDOWN_REASON_NONE] = "None",
7021  	[OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7022  	[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7023  	[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7024  	[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7025  	[OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7026  	[OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7027  	[OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7028  	[OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7029  	[OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7030  	[OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7031  	[OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7032  	[OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7033  	[OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7034  	[OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7035  	[OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7036  	[OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7037  	[OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7038  	[OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7039  	[OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7040  	[OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7041  	[OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7042  	[OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7043  	[OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7044  	[OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7045  	[OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7046  	[OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7047  	[OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7048  	[OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7049  	[OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7050  	[OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7051  	[OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7052  	[OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7053  					"Excessive buffer overrun",
7054  	[OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7055  	[OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7056  	[OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7057  	[OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7058  	[OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7059  	[OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7060  	[OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7061  	[OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7062  					"Local media not installed",
7063  	[OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7064  	[OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7065  	[OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7066  					"End to end not installed",
7067  	[OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7068  	[OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7069  	[OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7070  	[OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7071  	[OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7072  	[OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7073  };
7074  
7075  /* return the neighbor link down reason string */
link_down_reason_str(u8 reason)7076  static const char *link_down_reason_str(u8 reason)
7077  {
7078  	const char *str = NULL;
7079  
7080  	if (reason < ARRAY_SIZE(link_down_reason_strs))
7081  		str = link_down_reason_strs[reason];
7082  	if (!str)
7083  		str = "(invalid)";
7084  
7085  	return str;
7086  }
7087  
7088  /*
7089   * Handle a link down interrupt from the 8051.
7090   *
7091   * This is a work-queue function outside of the interrupt.
7092   */
handle_link_down(struct work_struct * work)7093  void handle_link_down(struct work_struct *work)
7094  {
7095  	u8 lcl_reason, neigh_reason = 0;
7096  	u8 link_down_reason;
7097  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7098  						  link_down_work);
7099  	int was_up;
7100  	static const char ldr_str[] = "Link down reason: ";
7101  
7102  	if ((ppd->host_link_state &
7103  	     (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7104  	     ppd->port_type == PORT_TYPE_FIXED)
7105  		ppd->offline_disabled_reason =
7106  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7107  
7108  	/* Go offline first, then deal with reading/writing through 8051 */
7109  	was_up = !!(ppd->host_link_state & HLS_UP);
7110  	set_link_state(ppd, HLS_DN_OFFLINE);
7111  	xchg(&ppd->is_link_down_queued, 0);
7112  
7113  	if (was_up) {
7114  		lcl_reason = 0;
7115  		/* link down reason is only valid if the link was up */
7116  		read_link_down_reason(ppd->dd, &link_down_reason);
7117  		switch (link_down_reason) {
7118  		case LDR_LINK_TRANSFER_ACTIVE_LOW:
7119  			/* the link went down, no idle message reason */
7120  			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7121  				    ldr_str);
7122  			break;
7123  		case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7124  			/*
7125  			 * The neighbor reason is only valid if an idle message
7126  			 * was received for it.
7127  			 */
7128  			read_planned_down_reason_code(ppd->dd, &neigh_reason);
7129  			dd_dev_info(ppd->dd,
7130  				    "%sNeighbor link down message %d, %s\n",
7131  				    ldr_str, neigh_reason,
7132  				    link_down_reason_str(neigh_reason));
7133  			break;
7134  		case LDR_RECEIVED_HOST_OFFLINE_REQ:
7135  			dd_dev_info(ppd->dd,
7136  				    "%sHost requested link to go offline\n",
7137  				    ldr_str);
7138  			break;
7139  		default:
7140  			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7141  				    ldr_str, link_down_reason);
7142  			break;
7143  		}
7144  
7145  		/*
7146  		 * If no reason, assume peer-initiated but missed
7147  		 * LinkGoingDown idle flits.
7148  		 */
7149  		if (neigh_reason == 0)
7150  			lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7151  	} else {
7152  		/* went down while polling or going up */
7153  		lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7154  	}
7155  
7156  	set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7157  
7158  	/* inform the SMA when the link transitions from up to down */
7159  	if (was_up && ppd->local_link_down_reason.sma == 0 &&
7160  	    ppd->neigh_link_down_reason.sma == 0) {
7161  		ppd->local_link_down_reason.sma =
7162  					ppd->local_link_down_reason.latest;
7163  		ppd->neigh_link_down_reason.sma =
7164  					ppd->neigh_link_down_reason.latest;
7165  	}
7166  
7167  	reset_neighbor_info(ppd);
7168  
7169  	/* disable the port */
7170  	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7171  
7172  	/*
7173  	 * If there is no cable attached, turn the DC off. Otherwise,
7174  	 * start the link bring up.
7175  	 */
7176  	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7177  		dc_shutdown(ppd->dd);
7178  	else
7179  		start_link(ppd);
7180  }
7181  
handle_link_bounce(struct work_struct * work)7182  void handle_link_bounce(struct work_struct *work)
7183  {
7184  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7185  							link_bounce_work);
7186  
7187  	/*
7188  	 * Only do something if the link is currently up.
7189  	 */
7190  	if (ppd->host_link_state & HLS_UP) {
7191  		set_link_state(ppd, HLS_DN_OFFLINE);
7192  		start_link(ppd);
7193  	} else {
7194  		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7195  			    __func__, link_state_name(ppd->host_link_state));
7196  	}
7197  }
7198  
7199  /*
7200   * Mask conversion: Capability exchange to Port LTP.  The capability
7201   * exchange has an implicit 16b CRC that is mandatory.
7202   */
cap_to_port_ltp(int cap)7203  static int cap_to_port_ltp(int cap)
7204  {
7205  	int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7206  
7207  	if (cap & CAP_CRC_14B)
7208  		port_ltp |= PORT_LTP_CRC_MODE_14;
7209  	if (cap & CAP_CRC_48B)
7210  		port_ltp |= PORT_LTP_CRC_MODE_48;
7211  	if (cap & CAP_CRC_12B_16B_PER_LANE)
7212  		port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7213  
7214  	return port_ltp;
7215  }
7216  
7217  /*
7218   * Convert an OPA Port LTP mask to capability mask
7219   */
port_ltp_to_cap(int port_ltp)7220  int port_ltp_to_cap(int port_ltp)
7221  {
7222  	int cap_mask = 0;
7223  
7224  	if (port_ltp & PORT_LTP_CRC_MODE_14)
7225  		cap_mask |= CAP_CRC_14B;
7226  	if (port_ltp & PORT_LTP_CRC_MODE_48)
7227  		cap_mask |= CAP_CRC_48B;
7228  	if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7229  		cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7230  
7231  	return cap_mask;
7232  }
7233  
7234  /*
7235   * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7236   */
lcb_to_port_ltp(int lcb_crc)7237  static int lcb_to_port_ltp(int lcb_crc)
7238  {
7239  	int port_ltp = 0;
7240  
7241  	if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7242  		port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7243  	else if (lcb_crc == LCB_CRC_48B)
7244  		port_ltp = PORT_LTP_CRC_MODE_48;
7245  	else if (lcb_crc == LCB_CRC_14B)
7246  		port_ltp = PORT_LTP_CRC_MODE_14;
7247  	else
7248  		port_ltp = PORT_LTP_CRC_MODE_16;
7249  
7250  	return port_ltp;
7251  }
7252  
clear_full_mgmt_pkey(struct hfi1_pportdata * ppd)7253  static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7254  {
7255  	if (ppd->pkeys[2] != 0) {
7256  		ppd->pkeys[2] = 0;
7257  		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7258  		hfi1_event_pkey_change(ppd->dd, ppd->port);
7259  	}
7260  }
7261  
7262  /*
7263   * Convert the given link width to the OPA link width bitmask.
7264   */
link_width_to_bits(struct hfi1_devdata * dd,u16 width)7265  static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7266  {
7267  	switch (width) {
7268  	case 0:
7269  		/*
7270  		 * Simulator and quick linkup do not set the width.
7271  		 * Just set it to 4x without complaint.
7272  		 */
7273  		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7274  			return OPA_LINK_WIDTH_4X;
7275  		return 0; /* no lanes up */
7276  	case 1: return OPA_LINK_WIDTH_1X;
7277  	case 2: return OPA_LINK_WIDTH_2X;
7278  	case 3: return OPA_LINK_WIDTH_3X;
7279  	case 4: return OPA_LINK_WIDTH_4X;
7280  	default:
7281  		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7282  			    __func__, width);
7283  		return OPA_LINK_WIDTH_4X;
7284  	}
7285  }
7286  
7287  /*
7288   * Do a population count on the bottom nibble.
7289   */
7290  static const u8 bit_counts[16] = {
7291  	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7292  };
7293  
nibble_to_count(u8 nibble)7294  static inline u8 nibble_to_count(u8 nibble)
7295  {
7296  	return bit_counts[nibble & 0xf];
7297  }
7298  
7299  /*
7300   * Read the active lane information from the 8051 registers and return
7301   * their widths.
7302   *
7303   * Active lane information is found in these 8051 registers:
7304   *	enable_lane_tx
7305   *	enable_lane_rx
7306   */
get_link_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7307  static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7308  			    u16 *rx_width)
7309  {
7310  	u16 tx, rx;
7311  	u8 enable_lane_rx;
7312  	u8 enable_lane_tx;
7313  	u8 tx_polarity_inversion;
7314  	u8 rx_polarity_inversion;
7315  	u8 max_rate;
7316  
7317  	/* read the active lanes */
7318  	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7319  			 &rx_polarity_inversion, &max_rate);
7320  	read_local_lni(dd, &enable_lane_rx);
7321  
7322  	/* convert to counts */
7323  	tx = nibble_to_count(enable_lane_tx);
7324  	rx = nibble_to_count(enable_lane_rx);
7325  
7326  	/*
7327  	 * Set link_speed_active here, overriding what was set in
7328  	 * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7329  	 * set the max_rate field in handle_verify_cap until v0.19.
7330  	 */
7331  	if ((dd->icode == ICODE_RTL_SILICON) &&
7332  	    (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7333  		/* max_rate: 0 = 12.5G, 1 = 25G */
7334  		switch (max_rate) {
7335  		case 0:
7336  			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7337  			break;
7338  		case 1:
7339  			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7340  			break;
7341  		default:
7342  			dd_dev_err(dd,
7343  				   "%s: unexpected max rate %d, using 25Gb\n",
7344  				   __func__, (int)max_rate);
7345  			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7346  			break;
7347  		}
7348  	}
7349  
7350  	dd_dev_info(dd,
7351  		    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7352  		    enable_lane_tx, tx, enable_lane_rx, rx);
7353  	*tx_width = link_width_to_bits(dd, tx);
7354  	*rx_width = link_width_to_bits(dd, rx);
7355  }
7356  
7357  /*
7358   * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7359   * Valid after the end of VerifyCap and during LinkUp.  Does not change
7360   * after link up.  I.e. look elsewhere for downgrade information.
7361   *
7362   * Bits are:
7363   *	+ bits [7:4] contain the number of active transmitters
7364   *	+ bits [3:0] contain the number of active receivers
7365   * These are numbers 1 through 4 and can be different values if the
7366   * link is asymmetric.
7367   *
7368   * verify_cap_local_fm_link_width[0] retains its original value.
7369   */
get_linkup_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7370  static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7371  			      u16 *rx_width)
7372  {
7373  	u16 widths, tx, rx;
7374  	u8 misc_bits, local_flags;
7375  	u16 active_tx, active_rx;
7376  
7377  	read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7378  	tx = widths >> 12;
7379  	rx = (widths >> 8) & 0xf;
7380  
7381  	*tx_width = link_width_to_bits(dd, tx);
7382  	*rx_width = link_width_to_bits(dd, rx);
7383  
7384  	/* print the active widths */
7385  	get_link_widths(dd, &active_tx, &active_rx);
7386  }
7387  
7388  /*
7389   * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7390   * hardware information when the link first comes up.
7391   *
7392   * The link width is not available until after VerifyCap.AllFramesReceived
7393   * (the trigger for handle_verify_cap), so this is outside that routine
7394   * and should be called when the 8051 signals linkup.
7395   */
get_linkup_link_widths(struct hfi1_pportdata * ppd)7396  void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7397  {
7398  	u16 tx_width, rx_width;
7399  
7400  	/* get end-of-LNI link widths */
7401  	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7402  
7403  	/* use tx_width as the link is supposed to be symmetric on link up */
7404  	ppd->link_width_active = tx_width;
7405  	/* link width downgrade active (LWD.A) starts out matching LW.A */
7406  	ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7407  	ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7408  	/* per OPA spec, on link up LWD.E resets to LWD.S */
7409  	ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7410  	/* cache the active egress rate (units {10^6 bits/sec]) */
7411  	ppd->current_egress_rate = active_egress_rate(ppd);
7412  }
7413  
7414  /*
7415   * Handle a verify capabilities interrupt from the 8051.
7416   *
7417   * This is a work-queue function outside of the interrupt.
7418   */
handle_verify_cap(struct work_struct * work)7419  void handle_verify_cap(struct work_struct *work)
7420  {
7421  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7422  								link_vc_work);
7423  	struct hfi1_devdata *dd = ppd->dd;
7424  	u64 reg;
7425  	u8 power_management;
7426  	u8 continuous;
7427  	u8 vcu;
7428  	u8 vau;
7429  	u8 z;
7430  	u16 vl15buf;
7431  	u16 link_widths;
7432  	u16 crc_mask;
7433  	u16 crc_val;
7434  	u16 device_id;
7435  	u16 active_tx, active_rx;
7436  	u8 partner_supported_crc;
7437  	u8 remote_tx_rate;
7438  	u8 device_rev;
7439  
7440  	set_link_state(ppd, HLS_VERIFY_CAP);
7441  
7442  	lcb_shutdown(dd, 0);
7443  	adjust_lcb_for_fpga_serdes(dd);
7444  
7445  	read_vc_remote_phy(dd, &power_management, &continuous);
7446  	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7447  			      &partner_supported_crc);
7448  	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7449  	read_remote_device_id(dd, &device_id, &device_rev);
7450  
7451  	/* print the active widths */
7452  	get_link_widths(dd, &active_tx, &active_rx);
7453  	dd_dev_info(dd,
7454  		    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7455  		    (int)power_management, (int)continuous);
7456  	dd_dev_info(dd,
7457  		    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7458  		    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7459  		    (int)partner_supported_crc);
7460  	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7461  		    (u32)remote_tx_rate, (u32)link_widths);
7462  	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7463  		    (u32)device_id, (u32)device_rev);
7464  	/*
7465  	 * The peer vAU value just read is the peer receiver value.  HFI does
7466  	 * not support a transmit vAU of 0 (AU == 8).  We advertised that
7467  	 * with Z=1 in the fabric capabilities sent to the peer.  The peer
7468  	 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7469  	 * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7470  	 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7471  	 * subject to the Z value exception.
7472  	 */
7473  	if (vau == 0)
7474  		vau = 1;
7475  	set_up_vau(dd, vau);
7476  
7477  	/*
7478  	 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7479  	 * credits value and wait for link-up interrupt ot set it.
7480  	 */
7481  	set_up_vl15(dd, 0);
7482  	dd->vl15buf_cached = vl15buf;
7483  
7484  	/* set up the LCB CRC mode */
7485  	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7486  
7487  	/* order is important: use the lowest bit in common */
7488  	if (crc_mask & CAP_CRC_14B)
7489  		crc_val = LCB_CRC_14B;
7490  	else if (crc_mask & CAP_CRC_48B)
7491  		crc_val = LCB_CRC_48B;
7492  	else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7493  		crc_val = LCB_CRC_12B_16B_PER_LANE;
7494  	else
7495  		crc_val = LCB_CRC_16B;
7496  
7497  	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7498  	write_csr(dd, DC_LCB_CFG_CRC_MODE,
7499  		  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7500  
7501  	/* set (14b only) or clear sideband credit */
7502  	reg = read_csr(dd, SEND_CM_CTRL);
7503  	if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7504  		write_csr(dd, SEND_CM_CTRL,
7505  			  reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7506  	} else {
7507  		write_csr(dd, SEND_CM_CTRL,
7508  			  reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7509  	}
7510  
7511  	ppd->link_speed_active = 0;	/* invalid value */
7512  	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7513  		/* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7514  		switch (remote_tx_rate) {
7515  		case 0:
7516  			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7517  			break;
7518  		case 1:
7519  			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7520  			break;
7521  		}
7522  	} else {
7523  		/* actual rate is highest bit of the ANDed rates */
7524  		u8 rate = remote_tx_rate & ppd->local_tx_rate;
7525  
7526  		if (rate & 2)
7527  			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7528  		else if (rate & 1)
7529  			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7530  	}
7531  	if (ppd->link_speed_active == 0) {
7532  		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7533  			   __func__, (int)remote_tx_rate);
7534  		ppd->link_speed_active = OPA_LINK_SPEED_25G;
7535  	}
7536  
7537  	/*
7538  	 * Cache the values of the supported, enabled, and active
7539  	 * LTP CRC modes to return in 'portinfo' queries. But the bit
7540  	 * flags that are returned in the portinfo query differ from
7541  	 * what's in the link_crc_mask, crc_sizes, and crc_val
7542  	 * variables. Convert these here.
7543  	 */
7544  	ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7545  		/* supported crc modes */
7546  	ppd->port_ltp_crc_mode |=
7547  		cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7548  		/* enabled crc modes */
7549  	ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7550  		/* active crc mode */
7551  
7552  	/* set up the remote credit return table */
7553  	assign_remote_cm_au_table(dd, vcu);
7554  
7555  	/*
7556  	 * The LCB is reset on entry to handle_verify_cap(), so this must
7557  	 * be applied on every link up.
7558  	 *
7559  	 * Adjust LCB error kill enable to kill the link if
7560  	 * these RBUF errors are seen:
7561  	 *	REPLAY_BUF_MBE_SMASK
7562  	 *	FLIT_INPUT_BUF_MBE_SMASK
7563  	 */
7564  	if (is_ax(dd)) {			/* fixed in B0 */
7565  		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7566  		reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7567  			| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7568  		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7569  	}
7570  
7571  	/* pull LCB fifos out of reset - all fifo clocks must be stable */
7572  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7573  
7574  	/* give 8051 access to the LCB CSRs */
7575  	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7576  	set_8051_lcb_access(dd);
7577  
7578  	/* tell the 8051 to go to LinkUp */
7579  	set_link_state(ppd, HLS_GOING_UP);
7580  }
7581  
7582  /**
7583   * apply_link_downgrade_policy - Apply the link width downgrade enabled
7584   * policy against the current active link widths.
7585   * @ppd: info of physical Hfi port
7586   * @refresh_widths: True indicates link downgrade event
7587   * @return: True indicates a successful link downgrade. False indicates
7588   *	    link downgrade event failed and the link will bounce back to
7589   *	    default link width.
7590   *
7591   * Called when the enabled policy changes or the active link widths
7592   * change.
7593   * Refresh_widths indicates that a link downgrade occurred. The
7594   * link_downgraded variable is set by refresh_widths and
7595   * determines the success/failure of the policy application.
7596   */
apply_link_downgrade_policy(struct hfi1_pportdata * ppd,bool refresh_widths)7597  bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7598  				 bool refresh_widths)
7599  {
7600  	int do_bounce = 0;
7601  	int tries;
7602  	u16 lwde;
7603  	u16 tx, rx;
7604  	bool link_downgraded = refresh_widths;
7605  
7606  	/* use the hls lock to avoid a race with actual link up */
7607  	tries = 0;
7608  retry:
7609  	mutex_lock(&ppd->hls_lock);
7610  	/* only apply if the link is up */
7611  	if (ppd->host_link_state & HLS_DOWN) {
7612  		/* still going up..wait and retry */
7613  		if (ppd->host_link_state & HLS_GOING_UP) {
7614  			if (++tries < 1000) {
7615  				mutex_unlock(&ppd->hls_lock);
7616  				usleep_range(100, 120); /* arbitrary */
7617  				goto retry;
7618  			}
7619  			dd_dev_err(ppd->dd,
7620  				   "%s: giving up waiting for link state change\n",
7621  				   __func__);
7622  		}
7623  		goto done;
7624  	}
7625  
7626  	lwde = ppd->link_width_downgrade_enabled;
7627  
7628  	if (refresh_widths) {
7629  		get_link_widths(ppd->dd, &tx, &rx);
7630  		ppd->link_width_downgrade_tx_active = tx;
7631  		ppd->link_width_downgrade_rx_active = rx;
7632  	}
7633  
7634  	if (ppd->link_width_downgrade_tx_active == 0 ||
7635  	    ppd->link_width_downgrade_rx_active == 0) {
7636  		/* the 8051 reported a dead link as a downgrade */
7637  		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7638  		link_downgraded = false;
7639  	} else if (lwde == 0) {
7640  		/* downgrade is disabled */
7641  
7642  		/* bounce if not at starting active width */
7643  		if ((ppd->link_width_active !=
7644  		     ppd->link_width_downgrade_tx_active) ||
7645  		    (ppd->link_width_active !=
7646  		     ppd->link_width_downgrade_rx_active)) {
7647  			dd_dev_err(ppd->dd,
7648  				   "Link downgrade is disabled and link has downgraded, downing link\n");
7649  			dd_dev_err(ppd->dd,
7650  				   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7651  				   ppd->link_width_active,
7652  				   ppd->link_width_downgrade_tx_active,
7653  				   ppd->link_width_downgrade_rx_active);
7654  			do_bounce = 1;
7655  			link_downgraded = false;
7656  		}
7657  	} else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7658  		   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7659  		/* Tx or Rx is outside the enabled policy */
7660  		dd_dev_err(ppd->dd,
7661  			   "Link is outside of downgrade allowed, downing link\n");
7662  		dd_dev_err(ppd->dd,
7663  			   "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7664  			   lwde, ppd->link_width_downgrade_tx_active,
7665  			   ppd->link_width_downgrade_rx_active);
7666  		do_bounce = 1;
7667  		link_downgraded = false;
7668  	}
7669  
7670  done:
7671  	mutex_unlock(&ppd->hls_lock);
7672  
7673  	if (do_bounce) {
7674  		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7675  				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7676  		set_link_state(ppd, HLS_DN_OFFLINE);
7677  		start_link(ppd);
7678  	}
7679  
7680  	return link_downgraded;
7681  }
7682  
7683  /*
7684   * Handle a link downgrade interrupt from the 8051.
7685   *
7686   * This is a work-queue function outside of the interrupt.
7687   */
handle_link_downgrade(struct work_struct * work)7688  void handle_link_downgrade(struct work_struct *work)
7689  {
7690  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7691  							link_downgrade_work);
7692  
7693  	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7694  	if (apply_link_downgrade_policy(ppd, true))
7695  		update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7696  }
7697  
dcc_err_string(char * buf,int buf_len,u64 flags)7698  static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7699  {
7700  	return flag_string(buf, buf_len, flags, dcc_err_flags,
7701  		ARRAY_SIZE(dcc_err_flags));
7702  }
7703  
lcb_err_string(char * buf,int buf_len,u64 flags)7704  static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7705  {
7706  	return flag_string(buf, buf_len, flags, lcb_err_flags,
7707  		ARRAY_SIZE(lcb_err_flags));
7708  }
7709  
dc8051_err_string(char * buf,int buf_len,u64 flags)7710  static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7711  {
7712  	return flag_string(buf, buf_len, flags, dc8051_err_flags,
7713  		ARRAY_SIZE(dc8051_err_flags));
7714  }
7715  
dc8051_info_err_string(char * buf,int buf_len,u64 flags)7716  static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7717  {
7718  	return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7719  		ARRAY_SIZE(dc8051_info_err_flags));
7720  }
7721  
dc8051_info_host_msg_string(char * buf,int buf_len,u64 flags)7722  static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7723  {
7724  	return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7725  		ARRAY_SIZE(dc8051_info_host_msg_flags));
7726  }
7727  
handle_8051_interrupt(struct hfi1_devdata * dd,u32 unused,u64 reg)7728  static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7729  {
7730  	struct hfi1_pportdata *ppd = dd->pport;
7731  	u64 info, err, host_msg;
7732  	int queue_link_down = 0;
7733  	char buf[96];
7734  
7735  	/* look at the flags */
7736  	if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7737  		/* 8051 information set by firmware */
7738  		/* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7739  		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7740  		err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7741  			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7742  		host_msg = (info >>
7743  			DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7744  			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7745  
7746  		/*
7747  		 * Handle error flags.
7748  		 */
7749  		if (err & FAILED_LNI) {
7750  			/*
7751  			 * LNI error indications are cleared by the 8051
7752  			 * only when starting polling.  Only pay attention
7753  			 * to them when in the states that occur during
7754  			 * LNI.
7755  			 */
7756  			if (ppd->host_link_state
7757  			    & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7758  				queue_link_down = 1;
7759  				dd_dev_info(dd, "Link error: %s\n",
7760  					    dc8051_info_err_string(buf,
7761  								   sizeof(buf),
7762  								   err &
7763  								   FAILED_LNI));
7764  			}
7765  			err &= ~(u64)FAILED_LNI;
7766  		}
7767  		/* unknown frames can happen durning LNI, just count */
7768  		if (err & UNKNOWN_FRAME) {
7769  			ppd->unknown_frame_count++;
7770  			err &= ~(u64)UNKNOWN_FRAME;
7771  		}
7772  		if (err) {
7773  			/* report remaining errors, but do not do anything */
7774  			dd_dev_err(dd, "8051 info error: %s\n",
7775  				   dc8051_info_err_string(buf, sizeof(buf),
7776  							  err));
7777  		}
7778  
7779  		/*
7780  		 * Handle host message flags.
7781  		 */
7782  		if (host_msg & HOST_REQ_DONE) {
7783  			/*
7784  			 * Presently, the driver does a busy wait for
7785  			 * host requests to complete.  This is only an
7786  			 * informational message.
7787  			 * NOTE: The 8051 clears the host message
7788  			 * information *on the next 8051 command*.
7789  			 * Therefore, when linkup is achieved,
7790  			 * this flag will still be set.
7791  			 */
7792  			host_msg &= ~(u64)HOST_REQ_DONE;
7793  		}
7794  		if (host_msg & BC_SMA_MSG) {
7795  			queue_work(ppd->link_wq, &ppd->sma_message_work);
7796  			host_msg &= ~(u64)BC_SMA_MSG;
7797  		}
7798  		if (host_msg & LINKUP_ACHIEVED) {
7799  			dd_dev_info(dd, "8051: Link up\n");
7800  			queue_work(ppd->link_wq, &ppd->link_up_work);
7801  			host_msg &= ~(u64)LINKUP_ACHIEVED;
7802  		}
7803  		if (host_msg & EXT_DEVICE_CFG_REQ) {
7804  			handle_8051_request(ppd);
7805  			host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7806  		}
7807  		if (host_msg & VERIFY_CAP_FRAME) {
7808  			queue_work(ppd->link_wq, &ppd->link_vc_work);
7809  			host_msg &= ~(u64)VERIFY_CAP_FRAME;
7810  		}
7811  		if (host_msg & LINK_GOING_DOWN) {
7812  			const char *extra = "";
7813  			/* no downgrade action needed if going down */
7814  			if (host_msg & LINK_WIDTH_DOWNGRADED) {
7815  				host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7816  				extra = " (ignoring downgrade)";
7817  			}
7818  			dd_dev_info(dd, "8051: Link down%s\n", extra);
7819  			queue_link_down = 1;
7820  			host_msg &= ~(u64)LINK_GOING_DOWN;
7821  		}
7822  		if (host_msg & LINK_WIDTH_DOWNGRADED) {
7823  			queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7824  			host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7825  		}
7826  		if (host_msg) {
7827  			/* report remaining messages, but do not do anything */
7828  			dd_dev_info(dd, "8051 info host message: %s\n",
7829  				    dc8051_info_host_msg_string(buf,
7830  								sizeof(buf),
7831  								host_msg));
7832  		}
7833  
7834  		reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7835  	}
7836  	if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7837  		/*
7838  		 * Lost the 8051 heartbeat.  If this happens, we
7839  		 * receive constant interrupts about it.  Disable
7840  		 * the interrupt after the first.
7841  		 */
7842  		dd_dev_err(dd, "Lost 8051 heartbeat\n");
7843  		write_csr(dd, DC_DC8051_ERR_EN,
7844  			  read_csr(dd, DC_DC8051_ERR_EN) &
7845  			  ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7846  
7847  		reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7848  	}
7849  	if (reg) {
7850  		/* report the error, but do not do anything */
7851  		dd_dev_err(dd, "8051 error: %s\n",
7852  			   dc8051_err_string(buf, sizeof(buf), reg));
7853  	}
7854  
7855  	if (queue_link_down) {
7856  		/*
7857  		 * if the link is already going down or disabled, do not
7858  		 * queue another. If there's a link down entry already
7859  		 * queued, don't queue another one.
7860  		 */
7861  		if ((ppd->host_link_state &
7862  		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7863  		    ppd->link_enabled == 0) {
7864  			dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7865  				    __func__, ppd->host_link_state,
7866  				    ppd->link_enabled);
7867  		} else {
7868  			if (xchg(&ppd->is_link_down_queued, 1) == 1)
7869  				dd_dev_info(dd,
7870  					    "%s: link down request already queued\n",
7871  					    __func__);
7872  			else
7873  				queue_work(ppd->link_wq, &ppd->link_down_work);
7874  		}
7875  	}
7876  }
7877  
7878  static const char * const fm_config_txt[] = {
7879  [0] =
7880  	"BadHeadDist: Distance violation between two head flits",
7881  [1] =
7882  	"BadTailDist: Distance violation between two tail flits",
7883  [2] =
7884  	"BadCtrlDist: Distance violation between two credit control flits",
7885  [3] =
7886  	"BadCrdAck: Credits return for unsupported VL",
7887  [4] =
7888  	"UnsupportedVLMarker: Received VL Marker",
7889  [5] =
7890  	"BadPreempt: Exceeded the preemption nesting level",
7891  [6] =
7892  	"BadControlFlit: Received unsupported control flit",
7893  /* no 7 */
7894  [8] =
7895  	"UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7896  };
7897  
7898  static const char * const port_rcv_txt[] = {
7899  [1] =
7900  	"BadPktLen: Illegal PktLen",
7901  [2] =
7902  	"PktLenTooLong: Packet longer than PktLen",
7903  [3] =
7904  	"PktLenTooShort: Packet shorter than PktLen",
7905  [4] =
7906  	"BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7907  [5] =
7908  	"BadDLID: Illegal DLID (0, doesn't match HFI)",
7909  [6] =
7910  	"BadL2: Illegal L2 opcode",
7911  [7] =
7912  	"BadSC: Unsupported SC",
7913  [9] =
7914  	"BadRC: Illegal RC",
7915  [11] =
7916  	"PreemptError: Preempting with same VL",
7917  [12] =
7918  	"PreemptVL15: Preempting a VL15 packet",
7919  };
7920  
7921  #define OPA_LDR_FMCONFIG_OFFSET 16
7922  #define OPA_LDR_PORTRCV_OFFSET 0
handle_dcc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)7923  static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7924  {
7925  	u64 info, hdr0, hdr1;
7926  	const char *extra;
7927  	char buf[96];
7928  	struct hfi1_pportdata *ppd = dd->pport;
7929  	u8 lcl_reason = 0;
7930  	int do_bounce = 0;
7931  
7932  	if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7933  		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7934  			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7935  			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7936  			/* set status bit */
7937  			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7938  		}
7939  		reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7940  	}
7941  
7942  	if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7943  		struct hfi1_pportdata *ppd = dd->pport;
7944  		/* this counter saturates at (2^32) - 1 */
7945  		if (ppd->link_downed < (u32)UINT_MAX)
7946  			ppd->link_downed++;
7947  		reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7948  	}
7949  
7950  	if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7951  		u8 reason_valid = 1;
7952  
7953  		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7954  		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7955  			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7956  			/* set status bit */
7957  			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7958  		}
7959  		switch (info) {
7960  		case 0:
7961  		case 1:
7962  		case 2:
7963  		case 3:
7964  		case 4:
7965  		case 5:
7966  		case 6:
7967  			extra = fm_config_txt[info];
7968  			break;
7969  		case 8:
7970  			extra = fm_config_txt[info];
7971  			if (ppd->port_error_action &
7972  			    OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7973  				do_bounce = 1;
7974  				/*
7975  				 * lcl_reason cannot be derived from info
7976  				 * for this error
7977  				 */
7978  				lcl_reason =
7979  				  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7980  			}
7981  			break;
7982  		default:
7983  			reason_valid = 0;
7984  			snprintf(buf, sizeof(buf), "reserved%lld", info);
7985  			extra = buf;
7986  			break;
7987  		}
7988  
7989  		if (reason_valid && !do_bounce) {
7990  			do_bounce = ppd->port_error_action &
7991  					(1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7992  			lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7993  		}
7994  
7995  		/* just report this */
7996  		dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7997  					extra);
7998  		reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7999  	}
8000  
8001  	if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8002  		u8 reason_valid = 1;
8003  
8004  		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8005  		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8006  		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8007  		if (!(dd->err_info_rcvport.status_and_code &
8008  		      OPA_EI_STATUS_SMASK)) {
8009  			dd->err_info_rcvport.status_and_code =
8010  				info & OPA_EI_CODE_SMASK;
8011  			/* set status bit */
8012  			dd->err_info_rcvport.status_and_code |=
8013  				OPA_EI_STATUS_SMASK;
8014  			/*
8015  			 * save first 2 flits in the packet that caused
8016  			 * the error
8017  			 */
8018  			dd->err_info_rcvport.packet_flit1 = hdr0;
8019  			dd->err_info_rcvport.packet_flit2 = hdr1;
8020  		}
8021  		switch (info) {
8022  		case 1:
8023  		case 2:
8024  		case 3:
8025  		case 4:
8026  		case 5:
8027  		case 6:
8028  		case 7:
8029  		case 9:
8030  		case 11:
8031  		case 12:
8032  			extra = port_rcv_txt[info];
8033  			break;
8034  		default:
8035  			reason_valid = 0;
8036  			snprintf(buf, sizeof(buf), "reserved%lld", info);
8037  			extra = buf;
8038  			break;
8039  		}
8040  
8041  		if (reason_valid && !do_bounce) {
8042  			do_bounce = ppd->port_error_action &
8043  					(1 << (OPA_LDR_PORTRCV_OFFSET + info));
8044  			lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8045  		}
8046  
8047  		/* just report this */
8048  		dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8049  					"               hdr0 0x%llx, hdr1 0x%llx\n",
8050  					extra, hdr0, hdr1);
8051  
8052  		reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8053  	}
8054  
8055  	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8056  		/* informative only */
8057  		dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8058  		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8059  	}
8060  	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8061  		/* informative only */
8062  		dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8063  		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8064  	}
8065  
8066  	if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8067  		reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8068  
8069  	/* report any remaining errors */
8070  	if (reg)
8071  		dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8072  					dcc_err_string(buf, sizeof(buf), reg));
8073  
8074  	if (lcl_reason == 0)
8075  		lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8076  
8077  	if (do_bounce) {
8078  		dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8079  					__func__);
8080  		set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8081  		queue_work(ppd->link_wq, &ppd->link_bounce_work);
8082  	}
8083  }
8084  
handle_lcb_err(struct hfi1_devdata * dd,u32 unused,u64 reg)8085  static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8086  {
8087  	char buf[96];
8088  
8089  	dd_dev_info(dd, "LCB Error: %s\n",
8090  		    lcb_err_string(buf, sizeof(buf), reg));
8091  }
8092  
8093  /*
8094   * CCE block DC interrupt.  Source is < 8.
8095   */
is_dc_int(struct hfi1_devdata * dd,unsigned int source)8096  static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8097  {
8098  	const struct err_reg_info *eri = &dc_errs[source];
8099  
8100  	if (eri->handler) {
8101  		interrupt_clear_down(dd, 0, eri);
8102  	} else if (source == 3 /* dc_lbm_int */) {
8103  		/*
8104  		 * This indicates that a parity error has occurred on the
8105  		 * address/control lines presented to the LBM.  The error
8106  		 * is a single pulse, there is no associated error flag,
8107  		 * and it is non-maskable.  This is because if a parity
8108  		 * error occurs on the request the request is dropped.
8109  		 * This should never occur, but it is nice to know if it
8110  		 * ever does.
8111  		 */
8112  		dd_dev_err(dd, "Parity error in DC LBM block\n");
8113  	} else {
8114  		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8115  	}
8116  }
8117  
8118  /*
8119   * TX block send credit interrupt.  Source is < 160.
8120   */
is_send_credit_int(struct hfi1_devdata * dd,unsigned int source)8121  static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8122  {
8123  	sc_group_release_update(dd, source);
8124  }
8125  
8126  /*
8127   * TX block SDMA interrupt.  Source is < 48.
8128   *
8129   * SDMA interrupts are grouped by type:
8130   *
8131   *	 0 -  N-1 = SDma
8132   *	 N - 2N-1 = SDmaProgress
8133   *	2N - 3N-1 = SDmaIdle
8134   */
is_sdma_eng_int(struct hfi1_devdata * dd,unsigned int source)8135  static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8136  {
8137  	/* what interrupt */
8138  	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8139  	/* which engine */
8140  	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8141  
8142  #ifdef CONFIG_SDMA_VERBOSITY
8143  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8144  		   slashstrip(__FILE__), __LINE__, __func__);
8145  	sdma_dumpstate(&dd->per_sdma[which]);
8146  #endif
8147  
8148  	if (likely(what < 3 && which < dd->num_sdma)) {
8149  		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8150  	} else {
8151  		/* should not happen */
8152  		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8153  	}
8154  }
8155  
8156  /**
8157   * is_rcv_avail_int() - User receive context available IRQ handler
8158   * @dd: valid dd
8159   * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8160   *
8161   * RX block receive available interrupt.  Source is < 160.
8162   *
8163   * This is the general interrupt handler for user (PSM) receive contexts,
8164   * and can only be used for non-threaded IRQs.
8165   */
is_rcv_avail_int(struct hfi1_devdata * dd,unsigned int source)8166  static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8167  {
8168  	struct hfi1_ctxtdata *rcd;
8169  	char *err_detail;
8170  
8171  	if (likely(source < dd->num_rcv_contexts)) {
8172  		rcd = hfi1_rcd_get_by_index(dd, source);
8173  		if (rcd) {
8174  			handle_user_interrupt(rcd);
8175  			hfi1_rcd_put(rcd);
8176  			return;	/* OK */
8177  		}
8178  		/* received an interrupt, but no rcd */
8179  		err_detail = "dataless";
8180  	} else {
8181  		/* received an interrupt, but are not using that context */
8182  		err_detail = "out of range";
8183  	}
8184  	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8185  		   err_detail, source);
8186  }
8187  
8188  /**
8189   * is_rcv_urgent_int() - User receive context urgent IRQ handler
8190   * @dd: valid dd
8191   * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8192   *
8193   * RX block receive urgent interrupt.  Source is < 160.
8194   *
8195   * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8196   */
is_rcv_urgent_int(struct hfi1_devdata * dd,unsigned int source)8197  static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8198  {
8199  	struct hfi1_ctxtdata *rcd;
8200  	char *err_detail;
8201  
8202  	if (likely(source < dd->num_rcv_contexts)) {
8203  		rcd = hfi1_rcd_get_by_index(dd, source);
8204  		if (rcd) {
8205  			handle_user_interrupt(rcd);
8206  			hfi1_rcd_put(rcd);
8207  			return;	/* OK */
8208  		}
8209  		/* received an interrupt, but no rcd */
8210  		err_detail = "dataless";
8211  	} else {
8212  		/* received an interrupt, but are not using that context */
8213  		err_detail = "out of range";
8214  	}
8215  	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8216  		   err_detail, source);
8217  }
8218  
8219  /*
8220   * Reserved range interrupt.  Should not be called in normal operation.
8221   */
is_reserved_int(struct hfi1_devdata * dd,unsigned int source)8222  static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8223  {
8224  	char name[64];
8225  
8226  	dd_dev_err(dd, "unexpected %s interrupt\n",
8227  		   is_reserved_name(name, sizeof(name), source));
8228  }
8229  
8230  static const struct is_table is_table[] = {
8231  /*
8232   * start		 end
8233   *				name func		interrupt func
8234   */
8235  { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8236  				is_misc_err_name,	is_misc_err_int },
8237  { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8238  				is_sdma_eng_err_name,	is_sdma_eng_err_int },
8239  { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8240  				is_sendctxt_err_name,	is_sendctxt_err_int },
8241  { IS_SDMA_START,	     IS_SDMA_IDLE_END,
8242  				is_sdma_eng_name,	is_sdma_eng_int },
8243  { IS_VARIOUS_START,	     IS_VARIOUS_END,
8244  				is_various_name,	is_various_int },
8245  { IS_DC_START,	     IS_DC_END,
8246  				is_dc_name,		is_dc_int },
8247  { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8248  				is_rcv_avail_name,	is_rcv_avail_int },
8249  { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8250  				is_rcv_urgent_name,	is_rcv_urgent_int },
8251  { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8252  				is_send_credit_name,	is_send_credit_int},
8253  { IS_RESERVED_START,     IS_RESERVED_END,
8254  				is_reserved_name,	is_reserved_int},
8255  };
8256  
8257  /*
8258   * Interrupt source interrupt - called when the given source has an interrupt.
8259   * Source is a bit index into an array of 64-bit integers.
8260   */
is_interrupt(struct hfi1_devdata * dd,unsigned int source)8261  static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8262  {
8263  	const struct is_table *entry;
8264  
8265  	/* avoids a double compare by walking the table in-order */
8266  	for (entry = &is_table[0]; entry->is_name; entry++) {
8267  		if (source <= entry->end) {
8268  			trace_hfi1_interrupt(dd, entry, source);
8269  			entry->is_int(dd, source - entry->start);
8270  			return;
8271  		}
8272  	}
8273  	/* fell off the end */
8274  	dd_dev_err(dd, "invalid interrupt source %u\n", source);
8275  }
8276  
8277  /**
8278   * general_interrupt -  General interrupt handler
8279   * @irq: MSIx IRQ vector
8280   * @data: hfi1 devdata
8281   *
8282   * This is able to correctly handle all non-threaded interrupts.  Receive
8283   * context DATA IRQs are threaded and are not supported by this handler.
8284   *
8285   */
general_interrupt(int irq,void * data)8286  irqreturn_t general_interrupt(int irq, void *data)
8287  {
8288  	struct hfi1_devdata *dd = data;
8289  	u64 regs[CCE_NUM_INT_CSRS];
8290  	u32 bit;
8291  	int i;
8292  	irqreturn_t handled = IRQ_NONE;
8293  
8294  	this_cpu_inc(*dd->int_counter);
8295  
8296  	/* phase 1: scan and clear all handled interrupts */
8297  	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8298  		if (dd->gi_mask[i] == 0) {
8299  			regs[i] = 0;	/* used later */
8300  			continue;
8301  		}
8302  		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8303  				dd->gi_mask[i];
8304  		/* only clear if anything is set */
8305  		if (regs[i])
8306  			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8307  	}
8308  
8309  	/* phase 2: call the appropriate handler */
8310  	for_each_set_bit(bit, (unsigned long *)&regs[0],
8311  			 CCE_NUM_INT_CSRS * 64) {
8312  		is_interrupt(dd, bit);
8313  		handled = IRQ_HANDLED;
8314  	}
8315  
8316  	return handled;
8317  }
8318  
sdma_interrupt(int irq,void * data)8319  irqreturn_t sdma_interrupt(int irq, void *data)
8320  {
8321  	struct sdma_engine *sde = data;
8322  	struct hfi1_devdata *dd = sde->dd;
8323  	u64 status;
8324  
8325  #ifdef CONFIG_SDMA_VERBOSITY
8326  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8327  		   slashstrip(__FILE__), __LINE__, __func__);
8328  	sdma_dumpstate(sde);
8329  #endif
8330  
8331  	this_cpu_inc(*dd->int_counter);
8332  
8333  	/* This read_csr is really bad in the hot path */
8334  	status = read_csr(dd,
8335  			  CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8336  			  & sde->imask;
8337  	if (likely(status)) {
8338  		/* clear the interrupt(s) */
8339  		write_csr(dd,
8340  			  CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8341  			  status);
8342  
8343  		/* handle the interrupt(s) */
8344  		sdma_engine_interrupt(sde, status);
8345  	} else {
8346  		dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8347  					sde->this_idx);
8348  	}
8349  	return IRQ_HANDLED;
8350  }
8351  
8352  /*
8353   * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8354   * to insure that the write completed.  This does NOT guarantee that
8355   * queued DMA writes to memory from the chip are pushed.
8356   */
clear_recv_intr(struct hfi1_ctxtdata * rcd)8357  static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8358  {
8359  	struct hfi1_devdata *dd = rcd->dd;
8360  	u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8361  
8362  	write_csr(dd, addr, rcd->imask);
8363  	/* force the above write on the chip and get a value back */
8364  	(void)read_csr(dd, addr);
8365  }
8366  
8367  /* force the receive interrupt */
force_recv_intr(struct hfi1_ctxtdata * rcd)8368  void force_recv_intr(struct hfi1_ctxtdata *rcd)
8369  {
8370  	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8371  }
8372  
8373  /*
8374   * Return non-zero if a packet is present.
8375   *
8376   * This routine is called when rechecking for packets after the RcvAvail
8377   * interrupt has been cleared down.  First, do a quick check of memory for
8378   * a packet present.  If not found, use an expensive CSR read of the context
8379   * tail to determine the actual tail.  The CSR read is necessary because there
8380   * is no method to push pending DMAs to memory other than an interrupt and we
8381   * are trying to determine if we need to force an interrupt.
8382   */
check_packet_present(struct hfi1_ctxtdata * rcd)8383  static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8384  {
8385  	u32 tail;
8386  
8387  	if (hfi1_packet_present(rcd))
8388  		return 1;
8389  
8390  	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8391  	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8392  	return hfi1_rcd_head(rcd) != tail;
8393  }
8394  
8395  /*
8396   * Common code for receive contexts interrupt handlers.
8397   * Update traces, increment kernel IRQ counter and
8398   * setup ASPM when needed.
8399   */
receive_interrupt_common(struct hfi1_ctxtdata * rcd)8400  static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
8401  {
8402  	struct hfi1_devdata *dd = rcd->dd;
8403  
8404  	trace_hfi1_receive_interrupt(dd, rcd);
8405  	this_cpu_inc(*dd->int_counter);
8406  	aspm_ctx_disable(rcd);
8407  }
8408  
8409  /*
8410   * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8411   * when there are packets present in the queue. When calling
8412   * with interrupts enabled please use hfi1_rcd_eoi_intr.
8413   *
8414   * @rcd: valid receive context
8415   */
__hfi1_rcd_eoi_intr(struct hfi1_ctxtdata * rcd)8416  static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8417  {
8418  	if (!rcd->rcvhdrq)
8419  		return;
8420  	clear_recv_intr(rcd);
8421  	if (check_packet_present(rcd))
8422  		force_recv_intr(rcd);
8423  }
8424  
8425  /**
8426   * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8427   *
8428   * @rcd: Ptr to hfi1_ctxtdata of receive context
8429   *
8430   *  Hold IRQs so we can safely clear the interrupt and
8431   *  recheck for a packet that may have arrived after the previous
8432   *  check and the interrupt clear.  If a packet arrived, force another
8433   *  interrupt. This routine can be called at the end of receive packet
8434   *  processing in interrupt service routines, interrupt service thread
8435   *  and softirqs
8436   */
hfi1_rcd_eoi_intr(struct hfi1_ctxtdata * rcd)8437  static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8438  {
8439  	unsigned long flags;
8440  
8441  	local_irq_save(flags);
8442  	__hfi1_rcd_eoi_intr(rcd);
8443  	local_irq_restore(flags);
8444  }
8445  
8446  /**
8447   * hfi1_netdev_rx_napi - napi poll function to move eoi inline
8448   * @napi: pointer to napi object
8449   * @budget: netdev budget
8450   */
hfi1_netdev_rx_napi(struct napi_struct * napi,int budget)8451  int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
8452  {
8453  	struct hfi1_netdev_rxq *rxq = container_of(napi,
8454  			struct hfi1_netdev_rxq, napi);
8455  	struct hfi1_ctxtdata *rcd = rxq->rcd;
8456  	int work_done = 0;
8457  
8458  	work_done = rcd->do_interrupt(rcd, budget);
8459  
8460  	if (work_done < budget) {
8461  		napi_complete_done(napi, work_done);
8462  		hfi1_rcd_eoi_intr(rcd);
8463  	}
8464  
8465  	return work_done;
8466  }
8467  
8468  /* Receive packet napi handler for netdevs VNIC and AIP  */
receive_context_interrupt_napi(int irq,void * data)8469  irqreturn_t receive_context_interrupt_napi(int irq, void *data)
8470  {
8471  	struct hfi1_ctxtdata *rcd = data;
8472  
8473  	receive_interrupt_common(rcd);
8474  
8475  	if (likely(rcd->napi)) {
8476  		if (likely(napi_schedule_prep(rcd->napi)))
8477  			__napi_schedule_irqoff(rcd->napi);
8478  		else
8479  			__hfi1_rcd_eoi_intr(rcd);
8480  	} else {
8481  		WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
8482  			  rcd->ctxt);
8483  		__hfi1_rcd_eoi_intr(rcd);
8484  	}
8485  
8486  	return IRQ_HANDLED;
8487  }
8488  
8489  /*
8490   * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8491   * This routine will try to handle packets immediately (latency), but if
8492   * it finds too many, it will invoke the thread handler (bandwitdh).  The
8493   * chip receive interrupt is *not* cleared down until this or the thread (if
8494   * invoked) is finished.  The intent is to avoid extra interrupts while we
8495   * are processing packets anyway.
8496   */
receive_context_interrupt(int irq,void * data)8497  irqreturn_t receive_context_interrupt(int irq, void *data)
8498  {
8499  	struct hfi1_ctxtdata *rcd = data;
8500  	int disposition;
8501  
8502  	receive_interrupt_common(rcd);
8503  
8504  	/* receive interrupt remains blocked while processing packets */
8505  	disposition = rcd->do_interrupt(rcd, 0);
8506  
8507  	/*
8508  	 * Too many packets were seen while processing packets in this
8509  	 * IRQ handler.  Invoke the handler thread.  The receive interrupt
8510  	 * remains blocked.
8511  	 */
8512  	if (disposition == RCV_PKT_LIMIT)
8513  		return IRQ_WAKE_THREAD;
8514  
8515  	__hfi1_rcd_eoi_intr(rcd);
8516  	return IRQ_HANDLED;
8517  }
8518  
8519  /*
8520   * Receive packet thread handler.  This expects to be invoked with the
8521   * receive interrupt still blocked.
8522   */
receive_context_thread(int irq,void * data)8523  irqreturn_t receive_context_thread(int irq, void *data)
8524  {
8525  	struct hfi1_ctxtdata *rcd = data;
8526  
8527  	/* receive interrupt is still blocked from the IRQ handler */
8528  	(void)rcd->do_interrupt(rcd, 1);
8529  
8530  	hfi1_rcd_eoi_intr(rcd);
8531  
8532  	return IRQ_HANDLED;
8533  }
8534  
8535  /* ========================================================================= */
8536  
read_physical_state(struct hfi1_devdata * dd)8537  u32 read_physical_state(struct hfi1_devdata *dd)
8538  {
8539  	u64 reg;
8540  
8541  	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8542  	return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8543  				& DC_DC8051_STS_CUR_STATE_PORT_MASK;
8544  }
8545  
read_logical_state(struct hfi1_devdata * dd)8546  u32 read_logical_state(struct hfi1_devdata *dd)
8547  {
8548  	u64 reg;
8549  
8550  	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8551  	return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8552  				& DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8553  }
8554  
set_logical_state(struct hfi1_devdata * dd,u32 chip_lstate)8555  static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8556  {
8557  	u64 reg;
8558  
8559  	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8560  	/* clear current state, set new state */
8561  	reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8562  	reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8563  	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8564  }
8565  
8566  /*
8567   * Use the 8051 to read a LCB CSR.
8568   */
read_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 * data)8569  static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8570  {
8571  	u32 regno;
8572  	int ret;
8573  
8574  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8575  		if (acquire_lcb_access(dd, 0) == 0) {
8576  			*data = read_csr(dd, addr);
8577  			release_lcb_access(dd, 0);
8578  			return 0;
8579  		}
8580  		return -EBUSY;
8581  	}
8582  
8583  	/* register is an index of LCB registers: (offset - base) / 8 */
8584  	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8585  	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8586  	if (ret != HCMD_SUCCESS)
8587  		return -EBUSY;
8588  	return 0;
8589  }
8590  
8591  /*
8592   * Provide a cache for some of the LCB registers in case the LCB is
8593   * unavailable.
8594   * (The LCB is unavailable in certain link states, for example.)
8595   */
8596  struct lcb_datum {
8597  	u32 off;
8598  	u64 val;
8599  };
8600  
8601  static struct lcb_datum lcb_cache[] = {
8602  	{ DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8603  	{ DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8604  	{ DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8605  };
8606  
update_lcb_cache(struct hfi1_devdata * dd)8607  static void update_lcb_cache(struct hfi1_devdata *dd)
8608  {
8609  	int i;
8610  	int ret;
8611  	u64 val;
8612  
8613  	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8614  		ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8615  
8616  		/* Update if we get good data */
8617  		if (likely(ret != -EBUSY))
8618  			lcb_cache[i].val = val;
8619  	}
8620  }
8621  
read_lcb_cache(u32 off,u64 * val)8622  static int read_lcb_cache(u32 off, u64 *val)
8623  {
8624  	int i;
8625  
8626  	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8627  		if (lcb_cache[i].off == off) {
8628  			*val = lcb_cache[i].val;
8629  			return 0;
8630  		}
8631  	}
8632  
8633  	pr_warn("%s bad offset 0x%x\n", __func__, off);
8634  	return -1;
8635  }
8636  
8637  /*
8638   * Read an LCB CSR.  Access may not be in host control, so check.
8639   * Return 0 on success, -EBUSY on failure.
8640   */
read_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 * data)8641  int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8642  {
8643  	struct hfi1_pportdata *ppd = dd->pport;
8644  
8645  	/* if up, go through the 8051 for the value */
8646  	if (ppd->host_link_state & HLS_UP)
8647  		return read_lcb_via_8051(dd, addr, data);
8648  	/* if going up or down, check the cache, otherwise, no access */
8649  	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8650  		if (read_lcb_cache(addr, data))
8651  			return -EBUSY;
8652  		return 0;
8653  	}
8654  
8655  	/* otherwise, host has access */
8656  	*data = read_csr(dd, addr);
8657  	return 0;
8658  }
8659  
8660  /*
8661   * Use the 8051 to write a LCB CSR.
8662   */
write_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 data)8663  static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8664  {
8665  	u32 regno;
8666  	int ret;
8667  
8668  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8669  	    (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8670  		if (acquire_lcb_access(dd, 0) == 0) {
8671  			write_csr(dd, addr, data);
8672  			release_lcb_access(dd, 0);
8673  			return 0;
8674  		}
8675  		return -EBUSY;
8676  	}
8677  
8678  	/* register is an index of LCB registers: (offset - base) / 8 */
8679  	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8680  	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8681  	if (ret != HCMD_SUCCESS)
8682  		return -EBUSY;
8683  	return 0;
8684  }
8685  
8686  /*
8687   * Write an LCB CSR.  Access may not be in host control, so check.
8688   * Return 0 on success, -EBUSY on failure.
8689   */
write_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 data)8690  int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8691  {
8692  	struct hfi1_pportdata *ppd = dd->pport;
8693  
8694  	/* if up, go through the 8051 for the value */
8695  	if (ppd->host_link_state & HLS_UP)
8696  		return write_lcb_via_8051(dd, addr, data);
8697  	/* if going up or down, no access */
8698  	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8699  		return -EBUSY;
8700  	/* otherwise, host has access */
8701  	write_csr(dd, addr, data);
8702  	return 0;
8703  }
8704  
8705  /*
8706   * Returns:
8707   *	< 0 = Linux error, not able to get access
8708   *	> 0 = 8051 command RETURN_CODE
8709   */
do_8051_command(struct hfi1_devdata * dd,u32 type,u64 in_data,u64 * out_data)8710  static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8711  			   u64 *out_data)
8712  {
8713  	u64 reg, completed;
8714  	int return_code;
8715  	unsigned long timeout;
8716  
8717  	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8718  
8719  	mutex_lock(&dd->dc8051_lock);
8720  
8721  	/* We can't send any commands to the 8051 if it's in reset */
8722  	if (dd->dc_shutdown) {
8723  		return_code = -ENODEV;
8724  		goto fail;
8725  	}
8726  
8727  	/*
8728  	 * If an 8051 host command timed out previously, then the 8051 is
8729  	 * stuck.
8730  	 *
8731  	 * On first timeout, attempt to reset and restart the entire DC
8732  	 * block (including 8051). (Is this too big of a hammer?)
8733  	 *
8734  	 * If the 8051 times out a second time, the reset did not bring it
8735  	 * back to healthy life. In that case, fail any subsequent commands.
8736  	 */
8737  	if (dd->dc8051_timed_out) {
8738  		if (dd->dc8051_timed_out > 1) {
8739  			dd_dev_err(dd,
8740  				   "Previous 8051 host command timed out, skipping command %u\n",
8741  				   type);
8742  			return_code = -ENXIO;
8743  			goto fail;
8744  		}
8745  		_dc_shutdown(dd);
8746  		_dc_start(dd);
8747  	}
8748  
8749  	/*
8750  	 * If there is no timeout, then the 8051 command interface is
8751  	 * waiting for a command.
8752  	 */
8753  
8754  	/*
8755  	 * When writing a LCB CSR, out_data contains the full value to
8756  	 * be written, while in_data contains the relative LCB
8757  	 * address in 7:0.  Do the work here, rather than the caller,
8758  	 * of distrubting the write data to where it needs to go:
8759  	 *
8760  	 * Write data
8761  	 *   39:00 -> in_data[47:8]
8762  	 *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8763  	 *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8764  	 */
8765  	if (type == HCMD_WRITE_LCB_CSR) {
8766  		in_data |= ((*out_data) & 0xffffffffffull) << 8;
8767  		/* must preserve COMPLETED - it is tied to hardware */
8768  		reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8769  		reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8770  		reg |= ((((*out_data) >> 40) & 0xff) <<
8771  				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8772  		      | ((((*out_data) >> 48) & 0xffff) <<
8773  				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8774  		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8775  	}
8776  
8777  	/*
8778  	 * Do two writes: the first to stabilize the type and req_data, the
8779  	 * second to activate.
8780  	 */
8781  	reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8782  			<< DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8783  		| (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8784  			<< DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8785  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8786  	reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8787  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8788  
8789  	/* wait for completion, alternate: interrupt */
8790  	timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8791  	while (1) {
8792  		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8793  		completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8794  		if (completed)
8795  			break;
8796  		if (time_after(jiffies, timeout)) {
8797  			dd->dc8051_timed_out++;
8798  			dd_dev_err(dd, "8051 host command %u timeout\n", type);
8799  			if (out_data)
8800  				*out_data = 0;
8801  			return_code = -ETIMEDOUT;
8802  			goto fail;
8803  		}
8804  		udelay(2);
8805  	}
8806  
8807  	if (out_data) {
8808  		*out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8809  				& DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8810  		if (type == HCMD_READ_LCB_CSR) {
8811  			/* top 16 bits are in a different register */
8812  			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8813  				& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8814  				<< (48
8815  				    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8816  		}
8817  	}
8818  	return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8819  				& DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8820  	dd->dc8051_timed_out = 0;
8821  	/*
8822  	 * Clear command for next user.
8823  	 */
8824  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8825  
8826  fail:
8827  	mutex_unlock(&dd->dc8051_lock);
8828  	return return_code;
8829  }
8830  
set_physical_link_state(struct hfi1_devdata * dd,u64 state)8831  static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8832  {
8833  	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8834  }
8835  
load_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 config_data)8836  int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8837  		     u8 lane_id, u32 config_data)
8838  {
8839  	u64 data;
8840  	int ret;
8841  
8842  	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8843  		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8844  		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
8845  	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8846  	if (ret != HCMD_SUCCESS) {
8847  		dd_dev_err(dd,
8848  			   "load 8051 config: field id %d, lane %d, err %d\n",
8849  			   (int)field_id, (int)lane_id, ret);
8850  	}
8851  	return ret;
8852  }
8853  
8854  /*
8855   * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8856   * set the result, even on error.
8857   * Return 0 on success, -errno on failure
8858   */
read_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 * result)8859  int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8860  		     u32 *result)
8861  {
8862  	u64 big_data;
8863  	u32 addr;
8864  	int ret;
8865  
8866  	/* address start depends on the lane_id */
8867  	if (lane_id < 4)
8868  		addr = (4 * NUM_GENERAL_FIELDS)
8869  			+ (lane_id * 4 * NUM_LANE_FIELDS);
8870  	else
8871  		addr = 0;
8872  	addr += field_id * 4;
8873  
8874  	/* read is in 8-byte chunks, hardware will truncate the address down */
8875  	ret = read_8051_data(dd, addr, 8, &big_data);
8876  
8877  	if (ret == 0) {
8878  		/* extract the 4 bytes we want */
8879  		if (addr & 0x4)
8880  			*result = (u32)(big_data >> 32);
8881  		else
8882  			*result = (u32)big_data;
8883  	} else {
8884  		*result = 0;
8885  		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8886  			   __func__, lane_id, field_id);
8887  	}
8888  
8889  	return ret;
8890  }
8891  
write_vc_local_phy(struct hfi1_devdata * dd,u8 power_management,u8 continuous)8892  static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8893  			      u8 continuous)
8894  {
8895  	u32 frame;
8896  
8897  	frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8898  		| power_management << POWER_MANAGEMENT_SHIFT;
8899  	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8900  				GENERAL_CONFIG, frame);
8901  }
8902  
write_vc_local_fabric(struct hfi1_devdata * dd,u8 vau,u8 z,u8 vcu,u16 vl15buf,u8 crc_sizes)8903  static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8904  				 u16 vl15buf, u8 crc_sizes)
8905  {
8906  	u32 frame;
8907  
8908  	frame = (u32)vau << VAU_SHIFT
8909  		| (u32)z << Z_SHIFT
8910  		| (u32)vcu << VCU_SHIFT
8911  		| (u32)vl15buf << VL15BUF_SHIFT
8912  		| (u32)crc_sizes << CRC_SIZES_SHIFT;
8913  	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8914  				GENERAL_CONFIG, frame);
8915  }
8916  
read_vc_local_link_mode(struct hfi1_devdata * dd,u8 * misc_bits,u8 * flag_bits,u16 * link_widths)8917  static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8918  				    u8 *flag_bits, u16 *link_widths)
8919  {
8920  	u32 frame;
8921  
8922  	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8923  			 &frame);
8924  	*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8925  	*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8926  	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8927  }
8928  
write_vc_local_link_mode(struct hfi1_devdata * dd,u8 misc_bits,u8 flag_bits,u16 link_widths)8929  static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8930  				    u8 misc_bits,
8931  				    u8 flag_bits,
8932  				    u16 link_widths)
8933  {
8934  	u32 frame;
8935  
8936  	frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8937  		| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8938  		| (u32)link_widths << LINK_WIDTH_SHIFT;
8939  	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8940  		     frame);
8941  }
8942  
write_local_device_id(struct hfi1_devdata * dd,u16 device_id,u8 device_rev)8943  static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8944  				 u8 device_rev)
8945  {
8946  	u32 frame;
8947  
8948  	frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8949  		| ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8950  	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8951  }
8952  
read_remote_device_id(struct hfi1_devdata * dd,u16 * device_id,u8 * device_rev)8953  static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8954  				  u8 *device_rev)
8955  {
8956  	u32 frame;
8957  
8958  	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8959  	*device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8960  	*device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8961  			& REMOTE_DEVICE_REV_MASK;
8962  }
8963  
write_host_interface_version(struct hfi1_devdata * dd,u8 version)8964  int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8965  {
8966  	u32 frame;
8967  	u32 mask;
8968  
8969  	mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8970  	read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8971  	/* Clear, then set field */
8972  	frame &= ~mask;
8973  	frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8974  	return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8975  				frame);
8976  }
8977  
read_misc_status(struct hfi1_devdata * dd,u8 * ver_major,u8 * ver_minor,u8 * ver_patch)8978  void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8979  		      u8 *ver_patch)
8980  {
8981  	u32 frame;
8982  
8983  	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8984  	*ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8985  		STS_FM_VERSION_MAJOR_MASK;
8986  	*ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8987  		STS_FM_VERSION_MINOR_MASK;
8988  
8989  	read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8990  	*ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8991  		STS_FM_VERSION_PATCH_MASK;
8992  }
8993  
read_vc_remote_phy(struct hfi1_devdata * dd,u8 * power_management,u8 * continuous)8994  static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8995  			       u8 *continuous)
8996  {
8997  	u32 frame;
8998  
8999  	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
9000  	*power_management = (frame >> POWER_MANAGEMENT_SHIFT)
9001  					& POWER_MANAGEMENT_MASK;
9002  	*continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
9003  					& CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
9004  }
9005  
read_vc_remote_fabric(struct hfi1_devdata * dd,u8 * vau,u8 * z,u8 * vcu,u16 * vl15buf,u8 * crc_sizes)9006  static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
9007  				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
9008  {
9009  	u32 frame;
9010  
9011  	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
9012  	*vau = (frame >> VAU_SHIFT) & VAU_MASK;
9013  	*z = (frame >> Z_SHIFT) & Z_MASK;
9014  	*vcu = (frame >> VCU_SHIFT) & VCU_MASK;
9015  	*vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
9016  	*crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
9017  }
9018  
read_vc_remote_link_width(struct hfi1_devdata * dd,u8 * remote_tx_rate,u16 * link_widths)9019  static void read_vc_remote_link_width(struct hfi1_devdata *dd,
9020  				      u8 *remote_tx_rate,
9021  				      u16 *link_widths)
9022  {
9023  	u32 frame;
9024  
9025  	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
9026  			 &frame);
9027  	*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
9028  				& REMOTE_TX_RATE_MASK;
9029  	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
9030  }
9031  
read_local_lni(struct hfi1_devdata * dd,u8 * enable_lane_rx)9032  static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
9033  {
9034  	u32 frame;
9035  
9036  	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
9037  	*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
9038  }
9039  
read_last_local_state(struct hfi1_devdata * dd,u32 * lls)9040  static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
9041  {
9042  	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
9043  }
9044  
read_last_remote_state(struct hfi1_devdata * dd,u32 * lrs)9045  static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
9046  {
9047  	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
9048  }
9049  
hfi1_read_link_quality(struct hfi1_devdata * dd,u8 * link_quality)9050  void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9051  {
9052  	u32 frame;
9053  	int ret;
9054  
9055  	*link_quality = 0;
9056  	if (dd->pport->host_link_state & HLS_UP) {
9057  		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9058  				       &frame);
9059  		if (ret == 0)
9060  			*link_quality = (frame >> LINK_QUALITY_SHIFT)
9061  						& LINK_QUALITY_MASK;
9062  	}
9063  }
9064  
read_planned_down_reason_code(struct hfi1_devdata * dd,u8 * pdrrc)9065  static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9066  {
9067  	u32 frame;
9068  
9069  	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9070  	*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9071  }
9072  
read_link_down_reason(struct hfi1_devdata * dd,u8 * ldr)9073  static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9074  {
9075  	u32 frame;
9076  
9077  	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9078  	*ldr = (frame & 0xff);
9079  }
9080  
read_tx_settings(struct hfi1_devdata * dd,u8 * enable_lane_tx,u8 * tx_polarity_inversion,u8 * rx_polarity_inversion,u8 * max_rate)9081  static int read_tx_settings(struct hfi1_devdata *dd,
9082  			    u8 *enable_lane_tx,
9083  			    u8 *tx_polarity_inversion,
9084  			    u8 *rx_polarity_inversion,
9085  			    u8 *max_rate)
9086  {
9087  	u32 frame;
9088  	int ret;
9089  
9090  	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9091  	*enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9092  				& ENABLE_LANE_TX_MASK;
9093  	*tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9094  				& TX_POLARITY_INVERSION_MASK;
9095  	*rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9096  				& RX_POLARITY_INVERSION_MASK;
9097  	*max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9098  	return ret;
9099  }
9100  
write_tx_settings(struct hfi1_devdata * dd,u8 enable_lane_tx,u8 tx_polarity_inversion,u8 rx_polarity_inversion,u8 max_rate)9101  static int write_tx_settings(struct hfi1_devdata *dd,
9102  			     u8 enable_lane_tx,
9103  			     u8 tx_polarity_inversion,
9104  			     u8 rx_polarity_inversion,
9105  			     u8 max_rate)
9106  {
9107  	u32 frame;
9108  
9109  	/* no need to mask, all variable sizes match field widths */
9110  	frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9111  		| tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9112  		| rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9113  		| max_rate << MAX_RATE_SHIFT;
9114  	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9115  }
9116  
9117  /*
9118   * Read an idle LCB message.
9119   *
9120   * Returns 0 on success, -EINVAL on error
9121   */
read_idle_message(struct hfi1_devdata * dd,u64 type,u64 * data_out)9122  static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9123  {
9124  	int ret;
9125  
9126  	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9127  	if (ret != HCMD_SUCCESS) {
9128  		dd_dev_err(dd, "read idle message: type %d, err %d\n",
9129  			   (u32)type, ret);
9130  		return -EINVAL;
9131  	}
9132  	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9133  	/* return only the payload as we already know the type */
9134  	*data_out >>= IDLE_PAYLOAD_SHIFT;
9135  	return 0;
9136  }
9137  
9138  /*
9139   * Read an idle SMA message.  To be done in response to a notification from
9140   * the 8051.
9141   *
9142   * Returns 0 on success, -EINVAL on error
9143   */
read_idle_sma(struct hfi1_devdata * dd,u64 * data)9144  static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9145  {
9146  	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9147  				 data);
9148  }
9149  
9150  /*
9151   * Send an idle LCB message.
9152   *
9153   * Returns 0 on success, -EINVAL on error
9154   */
send_idle_message(struct hfi1_devdata * dd,u64 data)9155  static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9156  {
9157  	int ret;
9158  
9159  	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9160  	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9161  	if (ret != HCMD_SUCCESS) {
9162  		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9163  			   data, ret);
9164  		return -EINVAL;
9165  	}
9166  	return 0;
9167  }
9168  
9169  /*
9170   * Send an idle SMA message.
9171   *
9172   * Returns 0 on success, -EINVAL on error
9173   */
send_idle_sma(struct hfi1_devdata * dd,u64 message)9174  int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9175  {
9176  	u64 data;
9177  
9178  	data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9179  		((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9180  	return send_idle_message(dd, data);
9181  }
9182  
9183  /*
9184   * Initialize the LCB then do a quick link up.  This may or may not be
9185   * in loopback.
9186   *
9187   * return 0 on success, -errno on error
9188   */
do_quick_linkup(struct hfi1_devdata * dd)9189  static int do_quick_linkup(struct hfi1_devdata *dd)
9190  {
9191  	int ret;
9192  
9193  	lcb_shutdown(dd, 0);
9194  
9195  	if (loopback) {
9196  		/* LCB_CFG_LOOPBACK.VAL = 2 */
9197  		/* LCB_CFG_LANE_WIDTH.VAL = 0 */
9198  		write_csr(dd, DC_LCB_CFG_LOOPBACK,
9199  			  IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9200  		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9201  	}
9202  
9203  	/* start the LCBs */
9204  	/* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9205  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9206  
9207  	/* simulator only loopback steps */
9208  	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9209  		/* LCB_CFG_RUN.EN = 1 */
9210  		write_csr(dd, DC_LCB_CFG_RUN,
9211  			  1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9212  
9213  		ret = wait_link_transfer_active(dd, 10);
9214  		if (ret)
9215  			return ret;
9216  
9217  		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9218  			  1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9219  	}
9220  
9221  	if (!loopback) {
9222  		/*
9223  		 * When doing quick linkup and not in loopback, both
9224  		 * sides must be done with LCB set-up before either
9225  		 * starts the quick linkup.  Put a delay here so that
9226  		 * both sides can be started and have a chance to be
9227  		 * done with LCB set up before resuming.
9228  		 */
9229  		dd_dev_err(dd,
9230  			   "Pausing for peer to be finished with LCB set up\n");
9231  		msleep(5000);
9232  		dd_dev_err(dd, "Continuing with quick linkup\n");
9233  	}
9234  
9235  	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9236  	set_8051_lcb_access(dd);
9237  
9238  	/*
9239  	 * State "quick" LinkUp request sets the physical link state to
9240  	 * LinkUp without a verify capability sequence.
9241  	 * This state is in simulator v37 and later.
9242  	 */
9243  	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9244  	if (ret != HCMD_SUCCESS) {
9245  		dd_dev_err(dd,
9246  			   "%s: set physical link state to quick LinkUp failed with return %d\n",
9247  			   __func__, ret);
9248  
9249  		set_host_lcb_access(dd);
9250  		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9251  
9252  		if (ret >= 0)
9253  			ret = -EINVAL;
9254  		return ret;
9255  	}
9256  
9257  	return 0; /* success */
9258  }
9259  
9260  /*
9261   * Do all special steps to set up loopback.
9262   */
init_loopback(struct hfi1_devdata * dd)9263  static int init_loopback(struct hfi1_devdata *dd)
9264  {
9265  	dd_dev_info(dd, "Entering loopback mode\n");
9266  
9267  	/* all loopbacks should disable self GUID check */
9268  	write_csr(dd, DC_DC8051_CFG_MODE,
9269  		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9270  
9271  	/*
9272  	 * The simulator has only one loopback option - LCB.  Switch
9273  	 * to that option, which includes quick link up.
9274  	 *
9275  	 * Accept all valid loopback values.
9276  	 */
9277  	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9278  	    (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9279  	     loopback == LOOPBACK_CABLE)) {
9280  		loopback = LOOPBACK_LCB;
9281  		quick_linkup = 1;
9282  		return 0;
9283  	}
9284  
9285  	/*
9286  	 * SerDes loopback init sequence is handled in set_local_link_attributes
9287  	 */
9288  	if (loopback == LOOPBACK_SERDES)
9289  		return 0;
9290  
9291  	/* LCB loopback - handled at poll time */
9292  	if (loopback == LOOPBACK_LCB) {
9293  		quick_linkup = 1; /* LCB is always quick linkup */
9294  
9295  		/* not supported in emulation due to emulation RTL changes */
9296  		if (dd->icode == ICODE_FPGA_EMULATION) {
9297  			dd_dev_err(dd,
9298  				   "LCB loopback not supported in emulation\n");
9299  			return -EINVAL;
9300  		}
9301  		return 0;
9302  	}
9303  
9304  	/* external cable loopback requires no extra steps */
9305  	if (loopback == LOOPBACK_CABLE)
9306  		return 0;
9307  
9308  	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9309  	return -EINVAL;
9310  }
9311  
9312  /*
9313   * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9314   * used in the Verify Capability link width attribute.
9315   */
opa_to_vc_link_widths(u16 opa_widths)9316  static u16 opa_to_vc_link_widths(u16 opa_widths)
9317  {
9318  	int i;
9319  	u16 result = 0;
9320  
9321  	static const struct link_bits {
9322  		u16 from;
9323  		u16 to;
9324  	} opa_link_xlate[] = {
9325  		{ OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9326  		{ OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9327  		{ OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9328  		{ OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9329  	};
9330  
9331  	for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9332  		if (opa_widths & opa_link_xlate[i].from)
9333  			result |= opa_link_xlate[i].to;
9334  	}
9335  	return result;
9336  }
9337  
9338  /*
9339   * Set link attributes before moving to polling.
9340   */
set_local_link_attributes(struct hfi1_pportdata * ppd)9341  static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9342  {
9343  	struct hfi1_devdata *dd = ppd->dd;
9344  	u8 enable_lane_tx;
9345  	u8 tx_polarity_inversion;
9346  	u8 rx_polarity_inversion;
9347  	int ret;
9348  	u32 misc_bits = 0;
9349  	/* reset our fabric serdes to clear any lingering problems */
9350  	fabric_serdes_reset(dd);
9351  
9352  	/* set the local tx rate - need to read-modify-write */
9353  	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9354  			       &rx_polarity_inversion, &ppd->local_tx_rate);
9355  	if (ret)
9356  		goto set_local_link_attributes_fail;
9357  
9358  	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9359  		/* set the tx rate to the fastest enabled */
9360  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9361  			ppd->local_tx_rate = 1;
9362  		else
9363  			ppd->local_tx_rate = 0;
9364  	} else {
9365  		/* set the tx rate to all enabled */
9366  		ppd->local_tx_rate = 0;
9367  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9368  			ppd->local_tx_rate |= 2;
9369  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9370  			ppd->local_tx_rate |= 1;
9371  	}
9372  
9373  	enable_lane_tx = 0xF; /* enable all four lanes */
9374  	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9375  				rx_polarity_inversion, ppd->local_tx_rate);
9376  	if (ret != HCMD_SUCCESS)
9377  		goto set_local_link_attributes_fail;
9378  
9379  	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9380  	if (ret != HCMD_SUCCESS) {
9381  		dd_dev_err(dd,
9382  			   "Failed to set host interface version, return 0x%x\n",
9383  			   ret);
9384  		goto set_local_link_attributes_fail;
9385  	}
9386  
9387  	/*
9388  	 * DC supports continuous updates.
9389  	 */
9390  	ret = write_vc_local_phy(dd,
9391  				 0 /* no power management */,
9392  				 1 /* continuous updates */);
9393  	if (ret != HCMD_SUCCESS)
9394  		goto set_local_link_attributes_fail;
9395  
9396  	/* z=1 in the next call: AU of 0 is not supported by the hardware */
9397  	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9398  				    ppd->port_crc_mode_enabled);
9399  	if (ret != HCMD_SUCCESS)
9400  		goto set_local_link_attributes_fail;
9401  
9402  	/*
9403  	 * SerDes loopback init sequence requires
9404  	 * setting bit 0 of MISC_CONFIG_BITS
9405  	 */
9406  	if (loopback == LOOPBACK_SERDES)
9407  		misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9408  
9409  	/*
9410  	 * An external device configuration request is used to reset the LCB
9411  	 * to retry to obtain operational lanes when the first attempt is
9412  	 * unsuccesful.
9413  	 */
9414  	if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9415  		misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9416  
9417  	ret = write_vc_local_link_mode(dd, misc_bits, 0,
9418  				       opa_to_vc_link_widths(
9419  						ppd->link_width_enabled));
9420  	if (ret != HCMD_SUCCESS)
9421  		goto set_local_link_attributes_fail;
9422  
9423  	/* let peer know who we are */
9424  	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9425  	if (ret == HCMD_SUCCESS)
9426  		return 0;
9427  
9428  set_local_link_attributes_fail:
9429  	dd_dev_err(dd,
9430  		   "Failed to set local link attributes, return 0x%x\n",
9431  		   ret);
9432  	return ret;
9433  }
9434  
9435  /*
9436   * Call this to start the link.
9437   * Do not do anything if the link is disabled.
9438   * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9439   */
start_link(struct hfi1_pportdata * ppd)9440  int start_link(struct hfi1_pportdata *ppd)
9441  {
9442  	/*
9443  	 * Tune the SerDes to a ballpark setting for optimal signal and bit
9444  	 * error rate.  Needs to be done before starting the link.
9445  	 */
9446  	tune_serdes(ppd);
9447  
9448  	if (!ppd->driver_link_ready) {
9449  		dd_dev_info(ppd->dd,
9450  			    "%s: stopping link start because driver is not ready\n",
9451  			    __func__);
9452  		return 0;
9453  	}
9454  
9455  	/*
9456  	 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9457  	 * pkey table can be configured properly if the HFI unit is connected
9458  	 * to switch port with MgmtAllowed=NO
9459  	 */
9460  	clear_full_mgmt_pkey(ppd);
9461  
9462  	return set_link_state(ppd, HLS_DN_POLL);
9463  }
9464  
wait_for_qsfp_init(struct hfi1_pportdata * ppd)9465  static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9466  {
9467  	struct hfi1_devdata *dd = ppd->dd;
9468  	u64 mask;
9469  	unsigned long timeout;
9470  
9471  	/*
9472  	 * Some QSFP cables have a quirk that asserts the IntN line as a side
9473  	 * effect of power up on plug-in. We ignore this false positive
9474  	 * interrupt until the module has finished powering up by waiting for
9475  	 * a minimum timeout of the module inrush initialization time of
9476  	 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9477  	 * module have stabilized.
9478  	 */
9479  	msleep(500);
9480  
9481  	/*
9482  	 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9483  	 */
9484  	timeout = jiffies + msecs_to_jiffies(2000);
9485  	while (1) {
9486  		mask = read_csr(dd, dd->hfi1_id ?
9487  				ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9488  		if (!(mask & QSFP_HFI0_INT_N))
9489  			break;
9490  		if (time_after(jiffies, timeout)) {
9491  			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9492  				    __func__);
9493  			break;
9494  		}
9495  		udelay(2);
9496  	}
9497  }
9498  
set_qsfp_int_n(struct hfi1_pportdata * ppd,u8 enable)9499  static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9500  {
9501  	struct hfi1_devdata *dd = ppd->dd;
9502  	u64 mask;
9503  
9504  	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9505  	if (enable) {
9506  		/*
9507  		 * Clear the status register to avoid an immediate interrupt
9508  		 * when we re-enable the IntN pin
9509  		 */
9510  		write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9511  			  QSFP_HFI0_INT_N);
9512  		mask |= (u64)QSFP_HFI0_INT_N;
9513  	} else {
9514  		mask &= ~(u64)QSFP_HFI0_INT_N;
9515  	}
9516  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9517  }
9518  
reset_qsfp(struct hfi1_pportdata * ppd)9519  int reset_qsfp(struct hfi1_pportdata *ppd)
9520  {
9521  	struct hfi1_devdata *dd = ppd->dd;
9522  	u64 mask, qsfp_mask;
9523  
9524  	/* Disable INT_N from triggering QSFP interrupts */
9525  	set_qsfp_int_n(ppd, 0);
9526  
9527  	/* Reset the QSFP */
9528  	mask = (u64)QSFP_HFI0_RESET_N;
9529  
9530  	qsfp_mask = read_csr(dd,
9531  			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9532  	qsfp_mask &= ~mask;
9533  	write_csr(dd,
9534  		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9535  
9536  	udelay(10);
9537  
9538  	qsfp_mask |= mask;
9539  	write_csr(dd,
9540  		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9541  
9542  	wait_for_qsfp_init(ppd);
9543  
9544  	/*
9545  	 * Allow INT_N to trigger the QSFP interrupt to watch
9546  	 * for alarms and warnings
9547  	 */
9548  	set_qsfp_int_n(ppd, 1);
9549  
9550  	/*
9551  	 * After the reset, AOC transmitters are enabled by default. They need
9552  	 * to be turned off to complete the QSFP setup before they can be
9553  	 * enabled again.
9554  	 */
9555  	return set_qsfp_tx(ppd, 0);
9556  }
9557  
handle_qsfp_error_conditions(struct hfi1_pportdata * ppd,u8 * qsfp_interrupt_status)9558  static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9559  					u8 *qsfp_interrupt_status)
9560  {
9561  	struct hfi1_devdata *dd = ppd->dd;
9562  
9563  	if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9564  	    (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9565  		dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9566  			   __func__);
9567  
9568  	if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9569  	    (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9570  		dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9571  			   __func__);
9572  
9573  	/*
9574  	 * The remaining alarms/warnings don't matter if the link is down.
9575  	 */
9576  	if (ppd->host_link_state & HLS_DOWN)
9577  		return 0;
9578  
9579  	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9580  	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9581  		dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9582  			   __func__);
9583  
9584  	if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9585  	    (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9586  		dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9587  			   __func__);
9588  
9589  	/* Byte 2 is vendor specific */
9590  
9591  	if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9592  	    (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9593  		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9594  			   __func__);
9595  
9596  	if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9597  	    (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9598  		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9599  			   __func__);
9600  
9601  	if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9602  	    (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9603  		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9604  			   __func__);
9605  
9606  	if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9607  	    (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9608  		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9609  			   __func__);
9610  
9611  	if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9612  	    (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9613  		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9614  			   __func__);
9615  
9616  	if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9617  	    (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9618  		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9619  			   __func__);
9620  
9621  	if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9622  	    (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9623  		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9624  			   __func__);
9625  
9626  	if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9627  	    (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9628  		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9629  			   __func__);
9630  
9631  	if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9632  	    (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9633  		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9634  			   __func__);
9635  
9636  	if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9637  	    (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9638  		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9639  			   __func__);
9640  
9641  	if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9642  	    (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9643  		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9644  			   __func__);
9645  
9646  	if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9647  	    (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9648  		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9649  			   __func__);
9650  
9651  	/* Bytes 9-10 and 11-12 are reserved */
9652  	/* Bytes 13-15 are vendor specific */
9653  
9654  	return 0;
9655  }
9656  
9657  /* This routine will only be scheduled if the QSFP module present is asserted */
qsfp_event(struct work_struct * work)9658  void qsfp_event(struct work_struct *work)
9659  {
9660  	struct qsfp_data *qd;
9661  	struct hfi1_pportdata *ppd;
9662  	struct hfi1_devdata *dd;
9663  
9664  	qd = container_of(work, struct qsfp_data, qsfp_work);
9665  	ppd = qd->ppd;
9666  	dd = ppd->dd;
9667  
9668  	/* Sanity check */
9669  	if (!qsfp_mod_present(ppd))
9670  		return;
9671  
9672  	if (ppd->host_link_state == HLS_DN_DISABLE) {
9673  		dd_dev_info(ppd->dd,
9674  			    "%s: stopping link start because link is disabled\n",
9675  			    __func__);
9676  		return;
9677  	}
9678  
9679  	/*
9680  	 * Turn DC back on after cable has been re-inserted. Up until
9681  	 * now, the DC has been in reset to save power.
9682  	 */
9683  	dc_start(dd);
9684  
9685  	if (qd->cache_refresh_required) {
9686  		set_qsfp_int_n(ppd, 0);
9687  
9688  		wait_for_qsfp_init(ppd);
9689  
9690  		/*
9691  		 * Allow INT_N to trigger the QSFP interrupt to watch
9692  		 * for alarms and warnings
9693  		 */
9694  		set_qsfp_int_n(ppd, 1);
9695  
9696  		start_link(ppd);
9697  	}
9698  
9699  	if (qd->check_interrupt_flags) {
9700  		u8 qsfp_interrupt_status[16] = {0,};
9701  
9702  		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9703  				  &qsfp_interrupt_status[0], 16) != 16) {
9704  			dd_dev_info(dd,
9705  				    "%s: Failed to read status of QSFP module\n",
9706  				    __func__);
9707  		} else {
9708  			unsigned long flags;
9709  
9710  			handle_qsfp_error_conditions(
9711  					ppd, qsfp_interrupt_status);
9712  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9713  			ppd->qsfp_info.check_interrupt_flags = 0;
9714  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9715  					       flags);
9716  		}
9717  	}
9718  }
9719  
init_qsfp_int(struct hfi1_devdata * dd)9720  void init_qsfp_int(struct hfi1_devdata *dd)
9721  {
9722  	struct hfi1_pportdata *ppd = dd->pport;
9723  	u64 qsfp_mask;
9724  
9725  	qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9726  	/* Clear current status to avoid spurious interrupts */
9727  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9728  		  qsfp_mask);
9729  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9730  		  qsfp_mask);
9731  
9732  	set_qsfp_int_n(ppd, 0);
9733  
9734  	/* Handle active low nature of INT_N and MODPRST_N pins */
9735  	if (qsfp_mod_present(ppd))
9736  		qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9737  	write_csr(dd,
9738  		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9739  		  qsfp_mask);
9740  
9741  	/* Enable the appropriate QSFP IRQ source */
9742  	if (!dd->hfi1_id)
9743  		set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9744  	else
9745  		set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9746  }
9747  
9748  /*
9749   * Do a one-time initialize of the LCB block.
9750   */
init_lcb(struct hfi1_devdata * dd)9751  static void init_lcb(struct hfi1_devdata *dd)
9752  {
9753  	/* simulator does not correctly handle LCB cclk loopback, skip */
9754  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9755  		return;
9756  
9757  	/* the DC has been reset earlier in the driver load */
9758  
9759  	/* set LCB for cclk loopback on the port */
9760  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9761  	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9762  	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9763  	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9764  	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9765  	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9766  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9767  }
9768  
9769  /*
9770   * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9771   * on error.
9772   */
test_qsfp_read(struct hfi1_pportdata * ppd)9773  static int test_qsfp_read(struct hfi1_pportdata *ppd)
9774  {
9775  	int ret;
9776  	u8 status;
9777  
9778  	/*
9779  	 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9780  	 * not present
9781  	 */
9782  	if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9783  		return 0;
9784  
9785  	/* read byte 2, the status byte */
9786  	ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9787  	if (ret < 0)
9788  		return ret;
9789  	if (ret != 1)
9790  		return -EIO;
9791  
9792  	return 0; /* success */
9793  }
9794  
9795  /*
9796   * Values for QSFP retry.
9797   *
9798   * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9799   * arrived at from experience on a large cluster.
9800   */
9801  #define MAX_QSFP_RETRIES 20
9802  #define QSFP_RETRY_WAIT 500 /* msec */
9803  
9804  /*
9805   * Try a QSFP read.  If it fails, schedule a retry for later.
9806   * Called on first link activation after driver load.
9807   */
try_start_link(struct hfi1_pportdata * ppd)9808  static void try_start_link(struct hfi1_pportdata *ppd)
9809  {
9810  	if (test_qsfp_read(ppd)) {
9811  		/* read failed */
9812  		if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9813  			dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9814  			return;
9815  		}
9816  		dd_dev_info(ppd->dd,
9817  			    "QSFP not responding, waiting and retrying %d\n",
9818  			    (int)ppd->qsfp_retry_count);
9819  		ppd->qsfp_retry_count++;
9820  		queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9821  				   msecs_to_jiffies(QSFP_RETRY_WAIT));
9822  		return;
9823  	}
9824  	ppd->qsfp_retry_count = 0;
9825  
9826  	start_link(ppd);
9827  }
9828  
9829  /*
9830   * Workqueue function to start the link after a delay.
9831   */
handle_start_link(struct work_struct * work)9832  void handle_start_link(struct work_struct *work)
9833  {
9834  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9835  						  start_link_work.work);
9836  	try_start_link(ppd);
9837  }
9838  
bringup_serdes(struct hfi1_pportdata * ppd)9839  int bringup_serdes(struct hfi1_pportdata *ppd)
9840  {
9841  	struct hfi1_devdata *dd = ppd->dd;
9842  	u64 guid;
9843  	int ret;
9844  
9845  	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9846  		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9847  
9848  	guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9849  	if (!guid) {
9850  		if (dd->base_guid)
9851  			guid = dd->base_guid + ppd->port - 1;
9852  		ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9853  	}
9854  
9855  	/* Set linkinit_reason on power up per OPA spec */
9856  	ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9857  
9858  	/* one-time init of the LCB */
9859  	init_lcb(dd);
9860  
9861  	if (loopback) {
9862  		ret = init_loopback(dd);
9863  		if (ret < 0)
9864  			return ret;
9865  	}
9866  
9867  	get_port_type(ppd);
9868  	if (ppd->port_type == PORT_TYPE_QSFP) {
9869  		set_qsfp_int_n(ppd, 0);
9870  		wait_for_qsfp_init(ppd);
9871  		set_qsfp_int_n(ppd, 1);
9872  	}
9873  
9874  	try_start_link(ppd);
9875  	return 0;
9876  }
9877  
hfi1_quiet_serdes(struct hfi1_pportdata * ppd)9878  void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9879  {
9880  	struct hfi1_devdata *dd = ppd->dd;
9881  
9882  	/*
9883  	 * Shut down the link and keep it down.   First turn off that the
9884  	 * driver wants to allow the link to be up (driver_link_ready).
9885  	 * Then make sure the link is not automatically restarted
9886  	 * (link_enabled).  Cancel any pending restart.  And finally
9887  	 * go offline.
9888  	 */
9889  	ppd->driver_link_ready = 0;
9890  	ppd->link_enabled = 0;
9891  
9892  	ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9893  	flush_delayed_work(&ppd->start_link_work);
9894  	cancel_delayed_work_sync(&ppd->start_link_work);
9895  
9896  	ppd->offline_disabled_reason =
9897  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9898  	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9899  			     OPA_LINKDOWN_REASON_REBOOT);
9900  	set_link_state(ppd, HLS_DN_OFFLINE);
9901  
9902  	/* disable the port */
9903  	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9904  	cancel_work_sync(&ppd->freeze_work);
9905  }
9906  
init_cpu_counters(struct hfi1_devdata * dd)9907  static inline int init_cpu_counters(struct hfi1_devdata *dd)
9908  {
9909  	struct hfi1_pportdata *ppd;
9910  	int i;
9911  
9912  	ppd = (struct hfi1_pportdata *)(dd + 1);
9913  	for (i = 0; i < dd->num_pports; i++, ppd++) {
9914  		ppd->ibport_data.rvp.rc_acks = NULL;
9915  		ppd->ibport_data.rvp.rc_qacks = NULL;
9916  		ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9917  		ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9918  		ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9919  		if (!ppd->ibport_data.rvp.rc_acks ||
9920  		    !ppd->ibport_data.rvp.rc_delayed_comp ||
9921  		    !ppd->ibport_data.rvp.rc_qacks)
9922  			return -ENOMEM;
9923  	}
9924  
9925  	return 0;
9926  }
9927  
9928  /*
9929   * index is the index into the receive array
9930   */
hfi1_put_tid(struct hfi1_devdata * dd,u32 index,u32 type,unsigned long pa,u16 order)9931  void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9932  		  u32 type, unsigned long pa, u16 order)
9933  {
9934  	u64 reg;
9935  
9936  	if (!(dd->flags & HFI1_PRESENT))
9937  		goto done;
9938  
9939  	if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9940  		pa = 0;
9941  		order = 0;
9942  	} else if (type > PT_INVALID) {
9943  		dd_dev_err(dd,
9944  			   "unexpected receive array type %u for index %u, not handled\n",
9945  			   type, index);
9946  		goto done;
9947  	}
9948  	trace_hfi1_put_tid(dd, index, type, pa, order);
9949  
9950  #define RT_ADDR_SHIFT 12	/* 4KB kernel address boundary */
9951  	reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9952  		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9953  		| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9954  					<< RCV_ARRAY_RT_ADDR_SHIFT;
9955  	trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9956  	writeq(reg, dd->rcvarray_wc + (index * 8));
9957  
9958  	if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9959  		/*
9960  		 * Eager entries are written and flushed
9961  		 *
9962  		 * Expected entries are flushed every 4 writes
9963  		 */
9964  		flush_wc();
9965  done:
9966  	return;
9967  }
9968  
hfi1_clear_tids(struct hfi1_ctxtdata * rcd)9969  void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9970  {
9971  	struct hfi1_devdata *dd = rcd->dd;
9972  	u32 i;
9973  
9974  	/* this could be optimized */
9975  	for (i = rcd->eager_base; i < rcd->eager_base +
9976  		     rcd->egrbufs.alloced; i++)
9977  		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9978  
9979  	for (i = rcd->expected_base;
9980  			i < rcd->expected_base + rcd->expected_count; i++)
9981  		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9982  }
9983  
9984  static const char * const ib_cfg_name_strings[] = {
9985  	"HFI1_IB_CFG_LIDLMC",
9986  	"HFI1_IB_CFG_LWID_DG_ENB",
9987  	"HFI1_IB_CFG_LWID_ENB",
9988  	"HFI1_IB_CFG_LWID",
9989  	"HFI1_IB_CFG_SPD_ENB",
9990  	"HFI1_IB_CFG_SPD",
9991  	"HFI1_IB_CFG_RXPOL_ENB",
9992  	"HFI1_IB_CFG_LREV_ENB",
9993  	"HFI1_IB_CFG_LINKLATENCY",
9994  	"HFI1_IB_CFG_HRTBT",
9995  	"HFI1_IB_CFG_OP_VLS",
9996  	"HFI1_IB_CFG_VL_HIGH_CAP",
9997  	"HFI1_IB_CFG_VL_LOW_CAP",
9998  	"HFI1_IB_CFG_OVERRUN_THRESH",
9999  	"HFI1_IB_CFG_PHYERR_THRESH",
10000  	"HFI1_IB_CFG_LINKDEFAULT",
10001  	"HFI1_IB_CFG_PKEYS",
10002  	"HFI1_IB_CFG_MTU",
10003  	"HFI1_IB_CFG_LSTATE",
10004  	"HFI1_IB_CFG_VL_HIGH_LIMIT",
10005  	"HFI1_IB_CFG_PMA_TICKS",
10006  	"HFI1_IB_CFG_PORT"
10007  };
10008  
ib_cfg_name(int which)10009  static const char *ib_cfg_name(int which)
10010  {
10011  	if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
10012  		return "invalid";
10013  	return ib_cfg_name_strings[which];
10014  }
10015  
hfi1_get_ib_cfg(struct hfi1_pportdata * ppd,int which)10016  int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
10017  {
10018  	struct hfi1_devdata *dd = ppd->dd;
10019  	int val = 0;
10020  
10021  	switch (which) {
10022  	case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
10023  		val = ppd->link_width_enabled;
10024  		break;
10025  	case HFI1_IB_CFG_LWID: /* currently active Link-width */
10026  		val = ppd->link_width_active;
10027  		break;
10028  	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10029  		val = ppd->link_speed_enabled;
10030  		break;
10031  	case HFI1_IB_CFG_SPD: /* current Link speed */
10032  		val = ppd->link_speed_active;
10033  		break;
10034  
10035  	case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
10036  	case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
10037  	case HFI1_IB_CFG_LINKLATENCY:
10038  		goto unimplemented;
10039  
10040  	case HFI1_IB_CFG_OP_VLS:
10041  		val = ppd->actual_vls_operational;
10042  		break;
10043  	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10044  		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10045  		break;
10046  	case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10047  		val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10048  		break;
10049  	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10050  		val = ppd->overrun_threshold;
10051  		break;
10052  	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10053  		val = ppd->phy_error_threshold;
10054  		break;
10055  	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10056  		val = HLS_DEFAULT;
10057  		break;
10058  
10059  	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10060  	case HFI1_IB_CFG_PMA_TICKS:
10061  	default:
10062  unimplemented:
10063  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10064  			dd_dev_info(
10065  				dd,
10066  				"%s: which %s: not implemented\n",
10067  				__func__,
10068  				ib_cfg_name(which));
10069  		break;
10070  	}
10071  
10072  	return val;
10073  }
10074  
10075  /*
10076   * The largest MAD packet size.
10077   */
10078  #define MAX_MAD_PACKET 2048
10079  
10080  /*
10081   * Return the maximum header bytes that can go on the _wire_
10082   * for this device. This count includes the ICRC which is
10083   * not part of the packet held in memory but it is appended
10084   * by the HW.
10085   * This is dependent on the device's receive header entry size.
10086   * HFI allows this to be set per-receive context, but the
10087   * driver presently enforces a global value.
10088   */
lrh_max_header_bytes(struct hfi1_devdata * dd)10089  u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10090  {
10091  	/*
10092  	 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10093  	 * the Receive Header Entry Size minus the PBC (or RHF) size
10094  	 * plus one DW for the ICRC appended by HW.
10095  	 *
10096  	 * dd->rcd[0].rcvhdrqentsize is in DW.
10097  	 * We use rcd[0] as all context will have the same value. Also,
10098  	 * the first kernel context would have been allocated by now so
10099  	 * we are guaranteed a valid value.
10100  	 */
10101  	return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10102  }
10103  
10104  /*
10105   * Set Send Length
10106   * @ppd: per port data
10107   *
10108   * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10109   * registers compare against LRH.PktLen, so use the max bytes included
10110   * in the LRH.
10111   *
10112   * This routine changes all VL values except VL15, which it maintains at
10113   * the same value.
10114   */
set_send_length(struct hfi1_pportdata * ppd)10115  static void set_send_length(struct hfi1_pportdata *ppd)
10116  {
10117  	struct hfi1_devdata *dd = ppd->dd;
10118  	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10119  	u32 maxvlmtu = dd->vld[15].mtu;
10120  	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10121  			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10122  		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10123  	int i, j;
10124  	u32 thres;
10125  
10126  	for (i = 0; i < ppd->vls_supported; i++) {
10127  		if (dd->vld[i].mtu > maxvlmtu)
10128  			maxvlmtu = dd->vld[i].mtu;
10129  		if (i <= 3)
10130  			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10131  				 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10132  				((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10133  		else
10134  			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10135  				 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10136  				((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10137  	}
10138  	write_csr(dd, SEND_LEN_CHECK0, len1);
10139  	write_csr(dd, SEND_LEN_CHECK1, len2);
10140  	/* adjust kernel credit return thresholds based on new MTUs */
10141  	/* all kernel receive contexts have the same hdrqentsize */
10142  	for (i = 0; i < ppd->vls_supported; i++) {
10143  		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10144  			    sc_mtu_to_threshold(dd->vld[i].sc,
10145  						dd->vld[i].mtu,
10146  						get_hdrqentsize(dd->rcd[0])));
10147  		for (j = 0; j < INIT_SC_PER_VL; j++)
10148  			sc_set_cr_threshold(
10149  					pio_select_send_context_vl(dd, j, i),
10150  					    thres);
10151  	}
10152  	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10153  		    sc_mtu_to_threshold(dd->vld[15].sc,
10154  					dd->vld[15].mtu,
10155  					dd->rcd[0]->rcvhdrqentsize));
10156  	sc_set_cr_threshold(dd->vld[15].sc, thres);
10157  
10158  	/* Adjust maximum MTU for the port in DC */
10159  	dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10160  		(ilog2(maxvlmtu >> 8) + 1);
10161  	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10162  	len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10163  	len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10164  		DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10165  	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10166  }
10167  
set_lidlmc(struct hfi1_pportdata * ppd)10168  static void set_lidlmc(struct hfi1_pportdata *ppd)
10169  {
10170  	int i;
10171  	u64 sreg = 0;
10172  	struct hfi1_devdata *dd = ppd->dd;
10173  	u32 mask = ~((1U << ppd->lmc) - 1);
10174  	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10175  	u32 lid;
10176  
10177  	/*
10178  	 * Program 0 in CSR if port lid is extended. This prevents
10179  	 * 9B packets being sent out for large lids.
10180  	 */
10181  	lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10182  	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10183  		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10184  	c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10185  			<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10186  	      ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10187  			<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10188  	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10189  
10190  	/*
10191  	 * Iterate over all the send contexts and set their SLID check
10192  	 */
10193  	sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10194  			SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10195  	       (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10196  			SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10197  
10198  	for (i = 0; i < chip_send_contexts(dd); i++) {
10199  		hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10200  			  i, (u32)sreg);
10201  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10202  	}
10203  
10204  	/* Now we have to do the same thing for the sdma engines */
10205  	sdma_update_lmc(dd, mask, lid);
10206  }
10207  
state_completed_string(u32 completed)10208  static const char *state_completed_string(u32 completed)
10209  {
10210  	static const char * const state_completed[] = {
10211  		"EstablishComm",
10212  		"OptimizeEQ",
10213  		"VerifyCap"
10214  	};
10215  
10216  	if (completed < ARRAY_SIZE(state_completed))
10217  		return state_completed[completed];
10218  
10219  	return "unknown";
10220  }
10221  
10222  static const char all_lanes_dead_timeout_expired[] =
10223  	"All lanes were inactive – was the interconnect media removed?";
10224  static const char tx_out_of_policy[] =
10225  	"Passing lanes on local port do not meet the local link width policy";
10226  static const char no_state_complete[] =
10227  	"State timeout occurred before link partner completed the state";
10228  static const char * const state_complete_reasons[] = {
10229  	[0x00] = "Reason unknown",
10230  	[0x01] = "Link was halted by driver, refer to LinkDownReason",
10231  	[0x02] = "Link partner reported failure",
10232  	[0x10] = "Unable to achieve frame sync on any lane",
10233  	[0x11] =
10234  	  "Unable to find a common bit rate with the link partner",
10235  	[0x12] =
10236  	  "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10237  	[0x13] =
10238  	  "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10239  	[0x14] = no_state_complete,
10240  	[0x15] =
10241  	  "State timeout occurred before link partner identified equalization presets",
10242  	[0x16] =
10243  	  "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10244  	[0x17] = tx_out_of_policy,
10245  	[0x20] = all_lanes_dead_timeout_expired,
10246  	[0x21] =
10247  	  "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10248  	[0x22] = no_state_complete,
10249  	[0x23] =
10250  	  "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10251  	[0x24] = tx_out_of_policy,
10252  	[0x30] = all_lanes_dead_timeout_expired,
10253  	[0x31] =
10254  	  "State timeout occurred waiting for host to process received frames",
10255  	[0x32] = no_state_complete,
10256  	[0x33] =
10257  	  "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10258  	[0x34] = tx_out_of_policy,
10259  	[0x35] = "Negotiated link width is mutually exclusive",
10260  	[0x36] =
10261  	  "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10262  	[0x37] = "Unable to resolve secure data exchange",
10263  };
10264  
state_complete_reason_code_string(struct hfi1_pportdata * ppd,u32 code)10265  static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10266  						     u32 code)
10267  {
10268  	const char *str = NULL;
10269  
10270  	if (code < ARRAY_SIZE(state_complete_reasons))
10271  		str = state_complete_reasons[code];
10272  
10273  	if (str)
10274  		return str;
10275  	return "Reserved";
10276  }
10277  
10278  /* describe the given last state complete frame */
decode_state_complete(struct hfi1_pportdata * ppd,u32 frame,const char * prefix)10279  static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10280  				  const char *prefix)
10281  {
10282  	struct hfi1_devdata *dd = ppd->dd;
10283  	u32 success;
10284  	u32 state;
10285  	u32 reason;
10286  	u32 lanes;
10287  
10288  	/*
10289  	 * Decode frame:
10290  	 *  [ 0: 0] - success
10291  	 *  [ 3: 1] - state
10292  	 *  [ 7: 4] - next state timeout
10293  	 *  [15: 8] - reason code
10294  	 *  [31:16] - lanes
10295  	 */
10296  	success = frame & 0x1;
10297  	state = (frame >> 1) & 0x7;
10298  	reason = (frame >> 8) & 0xff;
10299  	lanes = (frame >> 16) & 0xffff;
10300  
10301  	dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10302  		   prefix, frame);
10303  	dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10304  		   state_completed_string(state), state);
10305  	dd_dev_err(dd, "    state successfully completed: %s\n",
10306  		   success ? "yes" : "no");
10307  	dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10308  		   reason, state_complete_reason_code_string(ppd, reason));
10309  	dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10310  }
10311  
10312  /*
10313   * Read the last state complete frames and explain them.  This routine
10314   * expects to be called if the link went down during link negotiation
10315   * and initialization (LNI).  That is, anywhere between polling and link up.
10316   */
check_lni_states(struct hfi1_pportdata * ppd)10317  static void check_lni_states(struct hfi1_pportdata *ppd)
10318  {
10319  	u32 last_local_state;
10320  	u32 last_remote_state;
10321  
10322  	read_last_local_state(ppd->dd, &last_local_state);
10323  	read_last_remote_state(ppd->dd, &last_remote_state);
10324  
10325  	/*
10326  	 * Don't report anything if there is nothing to report.  A value of
10327  	 * 0 means the link was taken down while polling and there was no
10328  	 * training in-process.
10329  	 */
10330  	if (last_local_state == 0 && last_remote_state == 0)
10331  		return;
10332  
10333  	decode_state_complete(ppd, last_local_state, "transmitted");
10334  	decode_state_complete(ppd, last_remote_state, "received");
10335  }
10336  
10337  /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
wait_link_transfer_active(struct hfi1_devdata * dd,int wait_ms)10338  static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10339  {
10340  	u64 reg;
10341  	unsigned long timeout;
10342  
10343  	/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10344  	timeout = jiffies + msecs_to_jiffies(wait_ms);
10345  	while (1) {
10346  		reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10347  		if (reg)
10348  			break;
10349  		if (time_after(jiffies, timeout)) {
10350  			dd_dev_err(dd,
10351  				   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10352  			return -ETIMEDOUT;
10353  		}
10354  		udelay(2);
10355  	}
10356  	return 0;
10357  }
10358  
10359  /* called when the logical link state is not down as it should be */
force_logical_link_state_down(struct hfi1_pportdata * ppd)10360  static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10361  {
10362  	struct hfi1_devdata *dd = ppd->dd;
10363  
10364  	/*
10365  	 * Bring link up in LCB loopback
10366  	 */
10367  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10368  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10369  		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10370  
10371  	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10372  	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10373  	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10374  	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10375  
10376  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10377  	(void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10378  	udelay(3);
10379  	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10380  	write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10381  
10382  	wait_link_transfer_active(dd, 100);
10383  
10384  	/*
10385  	 * Bring the link down again.
10386  	 */
10387  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10388  	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10389  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10390  
10391  	dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10392  }
10393  
10394  /*
10395   * Helper for set_link_state().  Do not call except from that routine.
10396   * Expects ppd->hls_mutex to be held.
10397   *
10398   * @rem_reason value to be sent to the neighbor
10399   *
10400   * LinkDownReasons only set if transition succeeds.
10401   */
goto_offline(struct hfi1_pportdata * ppd,u8 rem_reason)10402  static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10403  {
10404  	struct hfi1_devdata *dd = ppd->dd;
10405  	u32 previous_state;
10406  	int offline_state_ret;
10407  	int ret;
10408  
10409  	update_lcb_cache(dd);
10410  
10411  	previous_state = ppd->host_link_state;
10412  	ppd->host_link_state = HLS_GOING_OFFLINE;
10413  
10414  	/* start offline transition */
10415  	ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10416  
10417  	if (ret != HCMD_SUCCESS) {
10418  		dd_dev_err(dd,
10419  			   "Failed to transition to Offline link state, return %d\n",
10420  			   ret);
10421  		return -EINVAL;
10422  	}
10423  	if (ppd->offline_disabled_reason ==
10424  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10425  		ppd->offline_disabled_reason =
10426  		HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10427  
10428  	offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10429  	if (offline_state_ret < 0)
10430  		return offline_state_ret;
10431  
10432  	/* Disabling AOC transmitters */
10433  	if (ppd->port_type == PORT_TYPE_QSFP &&
10434  	    ppd->qsfp_info.limiting_active &&
10435  	    qsfp_mod_present(ppd)) {
10436  		int ret;
10437  
10438  		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10439  		if (ret == 0) {
10440  			set_qsfp_tx(ppd, 0);
10441  			release_chip_resource(dd, qsfp_resource(dd));
10442  		} else {
10443  			/* not fatal, but should warn */
10444  			dd_dev_err(dd,
10445  				   "Unable to acquire lock to turn off QSFP TX\n");
10446  		}
10447  	}
10448  
10449  	/*
10450  	 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10451  	 * can take a while for the link to go down.
10452  	 */
10453  	if (offline_state_ret != PLS_OFFLINE_QUIET) {
10454  		ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10455  		if (ret < 0)
10456  			return ret;
10457  	}
10458  
10459  	/*
10460  	 * Now in charge of LCB - must be after the physical state is
10461  	 * offline.quiet and before host_link_state is changed.
10462  	 */
10463  	set_host_lcb_access(dd);
10464  	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10465  
10466  	/* make sure the logical state is also down */
10467  	ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10468  	if (ret)
10469  		force_logical_link_state_down(ppd);
10470  
10471  	ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10472  	update_statusp(ppd, IB_PORT_DOWN);
10473  
10474  	/*
10475  	 * The LNI has a mandatory wait time after the physical state
10476  	 * moves to Offline.Quiet.  The wait time may be different
10477  	 * depending on how the link went down.  The 8051 firmware
10478  	 * will observe the needed wait time and only move to ready
10479  	 * when that is completed.  The largest of the quiet timeouts
10480  	 * is 6s, so wait that long and then at least 0.5s more for
10481  	 * other transitions, and another 0.5s for a buffer.
10482  	 */
10483  	ret = wait_fm_ready(dd, 7000);
10484  	if (ret) {
10485  		dd_dev_err(dd,
10486  			   "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10487  		/* state is really offline, so make it so */
10488  		ppd->host_link_state = HLS_DN_OFFLINE;
10489  		return ret;
10490  	}
10491  
10492  	/*
10493  	 * The state is now offline and the 8051 is ready to accept host
10494  	 * requests.
10495  	 *	- change our state
10496  	 *	- notify others if we were previously in a linkup state
10497  	 */
10498  	ppd->host_link_state = HLS_DN_OFFLINE;
10499  	if (previous_state & HLS_UP) {
10500  		/* went down while link was up */
10501  		handle_linkup_change(dd, 0);
10502  	} else if (previous_state
10503  			& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10504  		/* went down while attempting link up */
10505  		check_lni_states(ppd);
10506  
10507  		/* The QSFP doesn't need to be reset on LNI failure */
10508  		ppd->qsfp_info.reset_needed = 0;
10509  	}
10510  
10511  	/* the active link width (downgrade) is 0 on link down */
10512  	ppd->link_width_active = 0;
10513  	ppd->link_width_downgrade_tx_active = 0;
10514  	ppd->link_width_downgrade_rx_active = 0;
10515  	ppd->current_egress_rate = 0;
10516  	return 0;
10517  }
10518  
10519  /* return the link state name */
link_state_name(u32 state)10520  static const char *link_state_name(u32 state)
10521  {
10522  	const char *name;
10523  	int n = ilog2(state);
10524  	static const char * const names[] = {
10525  		[__HLS_UP_INIT_BP]	 = "INIT",
10526  		[__HLS_UP_ARMED_BP]	 = "ARMED",
10527  		[__HLS_UP_ACTIVE_BP]	 = "ACTIVE",
10528  		[__HLS_DN_DOWNDEF_BP]	 = "DOWNDEF",
10529  		[__HLS_DN_POLL_BP]	 = "POLL",
10530  		[__HLS_DN_DISABLE_BP]	 = "DISABLE",
10531  		[__HLS_DN_OFFLINE_BP]	 = "OFFLINE",
10532  		[__HLS_VERIFY_CAP_BP]	 = "VERIFY_CAP",
10533  		[__HLS_GOING_UP_BP]	 = "GOING_UP",
10534  		[__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10535  		[__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10536  	};
10537  
10538  	name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10539  	return name ? name : "unknown";
10540  }
10541  
10542  /* return the link state reason name */
link_state_reason_name(struct hfi1_pportdata * ppd,u32 state)10543  static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10544  {
10545  	if (state == HLS_UP_INIT) {
10546  		switch (ppd->linkinit_reason) {
10547  		case OPA_LINKINIT_REASON_LINKUP:
10548  			return "(LINKUP)";
10549  		case OPA_LINKINIT_REASON_FLAPPING:
10550  			return "(FLAPPING)";
10551  		case OPA_LINKINIT_OUTSIDE_POLICY:
10552  			return "(OUTSIDE_POLICY)";
10553  		case OPA_LINKINIT_QUARANTINED:
10554  			return "(QUARANTINED)";
10555  		case OPA_LINKINIT_INSUFIC_CAPABILITY:
10556  			return "(INSUFIC_CAPABILITY)";
10557  		default:
10558  			break;
10559  		}
10560  	}
10561  	return "";
10562  }
10563  
10564  /*
10565   * driver_pstate - convert the driver's notion of a port's
10566   * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10567   * Return -1 (converted to a u32) to indicate error.
10568   */
driver_pstate(struct hfi1_pportdata * ppd)10569  u32 driver_pstate(struct hfi1_pportdata *ppd)
10570  {
10571  	switch (ppd->host_link_state) {
10572  	case HLS_UP_INIT:
10573  	case HLS_UP_ARMED:
10574  	case HLS_UP_ACTIVE:
10575  		return IB_PORTPHYSSTATE_LINKUP;
10576  	case HLS_DN_POLL:
10577  		return IB_PORTPHYSSTATE_POLLING;
10578  	case HLS_DN_DISABLE:
10579  		return IB_PORTPHYSSTATE_DISABLED;
10580  	case HLS_DN_OFFLINE:
10581  		return OPA_PORTPHYSSTATE_OFFLINE;
10582  	case HLS_VERIFY_CAP:
10583  		return IB_PORTPHYSSTATE_TRAINING;
10584  	case HLS_GOING_UP:
10585  		return IB_PORTPHYSSTATE_TRAINING;
10586  	case HLS_GOING_OFFLINE:
10587  		return OPA_PORTPHYSSTATE_OFFLINE;
10588  	case HLS_LINK_COOLDOWN:
10589  		return OPA_PORTPHYSSTATE_OFFLINE;
10590  	case HLS_DN_DOWNDEF:
10591  	default:
10592  		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10593  			   ppd->host_link_state);
10594  		return  -1;
10595  	}
10596  }
10597  
10598  /*
10599   * driver_lstate - convert the driver's notion of a port's
10600   * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10601   * (converted to a u32) to indicate error.
10602   */
driver_lstate(struct hfi1_pportdata * ppd)10603  u32 driver_lstate(struct hfi1_pportdata *ppd)
10604  {
10605  	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10606  		return IB_PORT_DOWN;
10607  
10608  	switch (ppd->host_link_state & HLS_UP) {
10609  	case HLS_UP_INIT:
10610  		return IB_PORT_INIT;
10611  	case HLS_UP_ARMED:
10612  		return IB_PORT_ARMED;
10613  	case HLS_UP_ACTIVE:
10614  		return IB_PORT_ACTIVE;
10615  	default:
10616  		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10617  			   ppd->host_link_state);
10618  	return -1;
10619  	}
10620  }
10621  
set_link_down_reason(struct hfi1_pportdata * ppd,u8 lcl_reason,u8 neigh_reason,u8 rem_reason)10622  void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10623  			  u8 neigh_reason, u8 rem_reason)
10624  {
10625  	if (ppd->local_link_down_reason.latest == 0 &&
10626  	    ppd->neigh_link_down_reason.latest == 0) {
10627  		ppd->local_link_down_reason.latest = lcl_reason;
10628  		ppd->neigh_link_down_reason.latest = neigh_reason;
10629  		ppd->remote_link_down_reason = rem_reason;
10630  	}
10631  }
10632  
10633  /**
10634   * data_vls_operational() - Verify if data VL BCT credits and MTU
10635   *			    are both set.
10636   * @ppd: pointer to hfi1_pportdata structure
10637   *
10638   * Return: true - Ok, false -otherwise.
10639   */
data_vls_operational(struct hfi1_pportdata * ppd)10640  static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10641  {
10642  	int i;
10643  	u64 reg;
10644  
10645  	if (!ppd->actual_vls_operational)
10646  		return false;
10647  
10648  	for (i = 0; i < ppd->vls_supported; i++) {
10649  		reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10650  		if ((reg && !ppd->dd->vld[i].mtu) ||
10651  		    (!reg && ppd->dd->vld[i].mtu))
10652  			return false;
10653  	}
10654  
10655  	return true;
10656  }
10657  
10658  /*
10659   * Change the physical and/or logical link state.
10660   *
10661   * Do not call this routine while inside an interrupt.  It contains
10662   * calls to routines that can take multiple seconds to finish.
10663   *
10664   * Returns 0 on success, -errno on failure.
10665   */
set_link_state(struct hfi1_pportdata * ppd,u32 state)10666  int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10667  {
10668  	struct hfi1_devdata *dd = ppd->dd;
10669  	struct ib_event event = {.device = NULL};
10670  	int ret1, ret = 0;
10671  	int orig_new_state, poll_bounce;
10672  
10673  	mutex_lock(&ppd->hls_lock);
10674  
10675  	orig_new_state = state;
10676  	if (state == HLS_DN_DOWNDEF)
10677  		state = HLS_DEFAULT;
10678  
10679  	/* interpret poll -> poll as a link bounce */
10680  	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10681  		      state == HLS_DN_POLL;
10682  
10683  	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10684  		    link_state_name(ppd->host_link_state),
10685  		    link_state_name(orig_new_state),
10686  		    poll_bounce ? "(bounce) " : "",
10687  		    link_state_reason_name(ppd, state));
10688  
10689  	/*
10690  	 * If we're going to a (HLS_*) link state that implies the logical
10691  	 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10692  	 * reset is_sm_config_started to 0.
10693  	 */
10694  	if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10695  		ppd->is_sm_config_started = 0;
10696  
10697  	/*
10698  	 * Do nothing if the states match.  Let a poll to poll link bounce
10699  	 * go through.
10700  	 */
10701  	if (ppd->host_link_state == state && !poll_bounce)
10702  		goto done;
10703  
10704  	switch (state) {
10705  	case HLS_UP_INIT:
10706  		if (ppd->host_link_state == HLS_DN_POLL &&
10707  		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10708  			/*
10709  			 * Quick link up jumps from polling to here.
10710  			 *
10711  			 * Whether in normal or loopback mode, the
10712  			 * simulator jumps from polling to link up.
10713  			 * Accept that here.
10714  			 */
10715  			/* OK */
10716  		} else if (ppd->host_link_state != HLS_GOING_UP) {
10717  			goto unexpected;
10718  		}
10719  
10720  		/*
10721  		 * Wait for Link_Up physical state.
10722  		 * Physical and Logical states should already be
10723  		 * be transitioned to LinkUp and LinkInit respectively.
10724  		 */
10725  		ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10726  		if (ret) {
10727  			dd_dev_err(dd,
10728  				   "%s: physical state did not change to LINK-UP\n",
10729  				   __func__);
10730  			break;
10731  		}
10732  
10733  		ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10734  		if (ret) {
10735  			dd_dev_err(dd,
10736  				   "%s: logical state did not change to INIT\n",
10737  				   __func__);
10738  			break;
10739  		}
10740  
10741  		/* clear old transient LINKINIT_REASON code */
10742  		if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10743  			ppd->linkinit_reason =
10744  				OPA_LINKINIT_REASON_LINKUP;
10745  
10746  		/* enable the port */
10747  		add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10748  
10749  		handle_linkup_change(dd, 1);
10750  		pio_kernel_linkup(dd);
10751  
10752  		/*
10753  		 * After link up, a new link width will have been set.
10754  		 * Update the xmit counters with regards to the new
10755  		 * link width.
10756  		 */
10757  		update_xmit_counters(ppd, ppd->link_width_active);
10758  
10759  		ppd->host_link_state = HLS_UP_INIT;
10760  		update_statusp(ppd, IB_PORT_INIT);
10761  		break;
10762  	case HLS_UP_ARMED:
10763  		if (ppd->host_link_state != HLS_UP_INIT)
10764  			goto unexpected;
10765  
10766  		if (!data_vls_operational(ppd)) {
10767  			dd_dev_err(dd,
10768  				   "%s: Invalid data VL credits or mtu\n",
10769  				   __func__);
10770  			ret = -EINVAL;
10771  			break;
10772  		}
10773  
10774  		set_logical_state(dd, LSTATE_ARMED);
10775  		ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10776  		if (ret) {
10777  			dd_dev_err(dd,
10778  				   "%s: logical state did not change to ARMED\n",
10779  				   __func__);
10780  			break;
10781  		}
10782  		ppd->host_link_state = HLS_UP_ARMED;
10783  		update_statusp(ppd, IB_PORT_ARMED);
10784  		/*
10785  		 * The simulator does not currently implement SMA messages,
10786  		 * so neighbor_normal is not set.  Set it here when we first
10787  		 * move to Armed.
10788  		 */
10789  		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10790  			ppd->neighbor_normal = 1;
10791  		break;
10792  	case HLS_UP_ACTIVE:
10793  		if (ppd->host_link_state != HLS_UP_ARMED)
10794  			goto unexpected;
10795  
10796  		set_logical_state(dd, LSTATE_ACTIVE);
10797  		ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10798  		if (ret) {
10799  			dd_dev_err(dd,
10800  				   "%s: logical state did not change to ACTIVE\n",
10801  				   __func__);
10802  		} else {
10803  			/* tell all engines to go running */
10804  			sdma_all_running(dd);
10805  			ppd->host_link_state = HLS_UP_ACTIVE;
10806  			update_statusp(ppd, IB_PORT_ACTIVE);
10807  
10808  			/* Signal the IB layer that the port has went active */
10809  			event.device = &dd->verbs_dev.rdi.ibdev;
10810  			event.element.port_num = ppd->port;
10811  			event.event = IB_EVENT_PORT_ACTIVE;
10812  		}
10813  		break;
10814  	case HLS_DN_POLL:
10815  		if ((ppd->host_link_state == HLS_DN_DISABLE ||
10816  		     ppd->host_link_state == HLS_DN_OFFLINE) &&
10817  		    dd->dc_shutdown)
10818  			dc_start(dd);
10819  		/* Hand LED control to the DC */
10820  		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10821  
10822  		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10823  			u8 tmp = ppd->link_enabled;
10824  
10825  			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10826  			if (ret) {
10827  				ppd->link_enabled = tmp;
10828  				break;
10829  			}
10830  			ppd->remote_link_down_reason = 0;
10831  
10832  			if (ppd->driver_link_ready)
10833  				ppd->link_enabled = 1;
10834  		}
10835  
10836  		set_all_slowpath(ppd->dd);
10837  		ret = set_local_link_attributes(ppd);
10838  		if (ret)
10839  			break;
10840  
10841  		ppd->port_error_action = 0;
10842  
10843  		if (quick_linkup) {
10844  			/* quick linkup does not go into polling */
10845  			ret = do_quick_linkup(dd);
10846  		} else {
10847  			ret1 = set_physical_link_state(dd, PLS_POLLING);
10848  			if (!ret1)
10849  				ret1 = wait_phys_link_out_of_offline(ppd,
10850  								     3000);
10851  			if (ret1 != HCMD_SUCCESS) {
10852  				dd_dev_err(dd,
10853  					   "Failed to transition to Polling link state, return 0x%x\n",
10854  					   ret1);
10855  				ret = -EINVAL;
10856  			}
10857  		}
10858  
10859  		/*
10860  		 * Change the host link state after requesting DC8051 to
10861  		 * change its physical state so that we can ignore any
10862  		 * interrupt with stale LNI(XX) error, which will not be
10863  		 * cleared until DC8051 transitions to Polling state.
10864  		 */
10865  		ppd->host_link_state = HLS_DN_POLL;
10866  		ppd->offline_disabled_reason =
10867  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10868  		/*
10869  		 * If an error occurred above, go back to offline.  The
10870  		 * caller may reschedule another attempt.
10871  		 */
10872  		if (ret)
10873  			goto_offline(ppd, 0);
10874  		else
10875  			log_physical_state(ppd, PLS_POLLING);
10876  		break;
10877  	case HLS_DN_DISABLE:
10878  		/* link is disabled */
10879  		ppd->link_enabled = 0;
10880  
10881  		/* allow any state to transition to disabled */
10882  
10883  		/* must transition to offline first */
10884  		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10885  			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10886  			if (ret)
10887  				break;
10888  			ppd->remote_link_down_reason = 0;
10889  		}
10890  
10891  		if (!dd->dc_shutdown) {
10892  			ret1 = set_physical_link_state(dd, PLS_DISABLED);
10893  			if (ret1 != HCMD_SUCCESS) {
10894  				dd_dev_err(dd,
10895  					   "Failed to transition to Disabled link state, return 0x%x\n",
10896  					   ret1);
10897  				ret = -EINVAL;
10898  				break;
10899  			}
10900  			ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10901  			if (ret) {
10902  				dd_dev_err(dd,
10903  					   "%s: physical state did not change to DISABLED\n",
10904  					   __func__);
10905  				break;
10906  			}
10907  			dc_shutdown(dd);
10908  		}
10909  		ppd->host_link_state = HLS_DN_DISABLE;
10910  		break;
10911  	case HLS_DN_OFFLINE:
10912  		if (ppd->host_link_state == HLS_DN_DISABLE)
10913  			dc_start(dd);
10914  
10915  		/* allow any state to transition to offline */
10916  		ret = goto_offline(ppd, ppd->remote_link_down_reason);
10917  		if (!ret)
10918  			ppd->remote_link_down_reason = 0;
10919  		break;
10920  	case HLS_VERIFY_CAP:
10921  		if (ppd->host_link_state != HLS_DN_POLL)
10922  			goto unexpected;
10923  		ppd->host_link_state = HLS_VERIFY_CAP;
10924  		log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10925  		break;
10926  	case HLS_GOING_UP:
10927  		if (ppd->host_link_state != HLS_VERIFY_CAP)
10928  			goto unexpected;
10929  
10930  		ret1 = set_physical_link_state(dd, PLS_LINKUP);
10931  		if (ret1 != HCMD_SUCCESS) {
10932  			dd_dev_err(dd,
10933  				   "Failed to transition to link up state, return 0x%x\n",
10934  				   ret1);
10935  			ret = -EINVAL;
10936  			break;
10937  		}
10938  		ppd->host_link_state = HLS_GOING_UP;
10939  		break;
10940  
10941  	case HLS_GOING_OFFLINE:		/* transient within goto_offline() */
10942  	case HLS_LINK_COOLDOWN:		/* transient within goto_offline() */
10943  	default:
10944  		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10945  			    __func__, state);
10946  		ret = -EINVAL;
10947  		break;
10948  	}
10949  
10950  	goto done;
10951  
10952  unexpected:
10953  	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10954  		   __func__, link_state_name(ppd->host_link_state),
10955  		   link_state_name(state));
10956  	ret = -EINVAL;
10957  
10958  done:
10959  	mutex_unlock(&ppd->hls_lock);
10960  
10961  	if (event.device)
10962  		ib_dispatch_event(&event);
10963  
10964  	return ret;
10965  }
10966  
hfi1_set_ib_cfg(struct hfi1_pportdata * ppd,int which,u32 val)10967  int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10968  {
10969  	u64 reg;
10970  	int ret = 0;
10971  
10972  	switch (which) {
10973  	case HFI1_IB_CFG_LIDLMC:
10974  		set_lidlmc(ppd);
10975  		break;
10976  	case HFI1_IB_CFG_VL_HIGH_LIMIT:
10977  		/*
10978  		 * The VL Arbitrator high limit is sent in units of 4k
10979  		 * bytes, while HFI stores it in units of 64 bytes.
10980  		 */
10981  		val *= 4096 / 64;
10982  		reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10983  			<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10984  		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10985  		break;
10986  	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10987  		/* HFI only supports POLL as the default link down state */
10988  		if (val != HLS_DN_POLL)
10989  			ret = -EINVAL;
10990  		break;
10991  	case HFI1_IB_CFG_OP_VLS:
10992  		if (ppd->vls_operational != val) {
10993  			ppd->vls_operational = val;
10994  			if (!ppd->port)
10995  				ret = -EINVAL;
10996  		}
10997  		break;
10998  	/*
10999  	 * For link width, link width downgrade, and speed enable, always AND
11000  	 * the setting with what is actually supported.  This has two benefits.
11001  	 * First, enabled can't have unsupported values, no matter what the
11002  	 * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
11003  	 * "fill in with your supported value" have all the bits in the
11004  	 * field set, so simply ANDing with supported has the desired result.
11005  	 */
11006  	case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
11007  		ppd->link_width_enabled = val & ppd->link_width_supported;
11008  		break;
11009  	case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
11010  		ppd->link_width_downgrade_enabled =
11011  				val & ppd->link_width_downgrade_supported;
11012  		break;
11013  	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
11014  		ppd->link_speed_enabled = val & ppd->link_speed_supported;
11015  		break;
11016  	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
11017  		/*
11018  		 * HFI does not follow IB specs, save this value
11019  		 * so we can report it, if asked.
11020  		 */
11021  		ppd->overrun_threshold = val;
11022  		break;
11023  	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
11024  		/*
11025  		 * HFI does not follow IB specs, save this value
11026  		 * so we can report it, if asked.
11027  		 */
11028  		ppd->phy_error_threshold = val;
11029  		break;
11030  
11031  	case HFI1_IB_CFG_MTU:
11032  		set_send_length(ppd);
11033  		break;
11034  
11035  	case HFI1_IB_CFG_PKEYS:
11036  		if (HFI1_CAP_IS_KSET(PKEY_CHECK))
11037  			set_partition_keys(ppd);
11038  		break;
11039  
11040  	default:
11041  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11042  			dd_dev_info(ppd->dd,
11043  				    "%s: which %s, val 0x%x: not implemented\n",
11044  				    __func__, ib_cfg_name(which), val);
11045  		break;
11046  	}
11047  	return ret;
11048  }
11049  
11050  /* begin functions related to vl arbitration table caching */
init_vl_arb_caches(struct hfi1_pportdata * ppd)11051  static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11052  {
11053  	int i;
11054  
11055  	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11056  			VL_ARB_LOW_PRIO_TABLE_SIZE);
11057  	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11058  			VL_ARB_HIGH_PRIO_TABLE_SIZE);
11059  
11060  	/*
11061  	 * Note that we always return values directly from the
11062  	 * 'vl_arb_cache' (and do no CSR reads) in response to a
11063  	 * 'Get(VLArbTable)'. This is obviously correct after a
11064  	 * 'Set(VLArbTable)', since the cache will then be up to
11065  	 * date. But it's also correct prior to any 'Set(VLArbTable)'
11066  	 * since then both the cache, and the relevant h/w registers
11067  	 * will be zeroed.
11068  	 */
11069  
11070  	for (i = 0; i < MAX_PRIO_TABLE; i++)
11071  		spin_lock_init(&ppd->vl_arb_cache[i].lock);
11072  }
11073  
11074  /*
11075   * vl_arb_lock_cache
11076   *
11077   * All other vl_arb_* functions should be called only after locking
11078   * the cache.
11079   */
11080  static inline struct vl_arb_cache *
vl_arb_lock_cache(struct hfi1_pportdata * ppd,int idx)11081  vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11082  {
11083  	if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11084  		return NULL;
11085  	spin_lock(&ppd->vl_arb_cache[idx].lock);
11086  	return &ppd->vl_arb_cache[idx];
11087  }
11088  
vl_arb_unlock_cache(struct hfi1_pportdata * ppd,int idx)11089  static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11090  {
11091  	spin_unlock(&ppd->vl_arb_cache[idx].lock);
11092  }
11093  
vl_arb_get_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11094  static void vl_arb_get_cache(struct vl_arb_cache *cache,
11095  			     struct ib_vl_weight_elem *vl)
11096  {
11097  	memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11098  }
11099  
vl_arb_set_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11100  static void vl_arb_set_cache(struct vl_arb_cache *cache,
11101  			     struct ib_vl_weight_elem *vl)
11102  {
11103  	memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11104  }
11105  
vl_arb_match_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11106  static int vl_arb_match_cache(struct vl_arb_cache *cache,
11107  			      struct ib_vl_weight_elem *vl)
11108  {
11109  	return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11110  }
11111  
11112  /* end functions related to vl arbitration table caching */
11113  
set_vl_weights(struct hfi1_pportdata * ppd,u32 target,u32 size,struct ib_vl_weight_elem * vl)11114  static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11115  			  u32 size, struct ib_vl_weight_elem *vl)
11116  {
11117  	struct hfi1_devdata *dd = ppd->dd;
11118  	u64 reg;
11119  	unsigned int i, is_up = 0;
11120  	int drain, ret = 0;
11121  
11122  	mutex_lock(&ppd->hls_lock);
11123  
11124  	if (ppd->host_link_state & HLS_UP)
11125  		is_up = 1;
11126  
11127  	drain = !is_ax(dd) && is_up;
11128  
11129  	if (drain)
11130  		/*
11131  		 * Before adjusting VL arbitration weights, empty per-VL
11132  		 * FIFOs, otherwise a packet whose VL weight is being
11133  		 * set to 0 could get stuck in a FIFO with no chance to
11134  		 * egress.
11135  		 */
11136  		ret = stop_drain_data_vls(dd);
11137  
11138  	if (ret) {
11139  		dd_dev_err(
11140  			dd,
11141  			"%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11142  			__func__);
11143  		goto err;
11144  	}
11145  
11146  	for (i = 0; i < size; i++, vl++) {
11147  		/*
11148  		 * NOTE: The low priority shift and mask are used here, but
11149  		 * they are the same for both the low and high registers.
11150  		 */
11151  		reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11152  				<< SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11153  		      | (((u64)vl->weight
11154  				& SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11155  				<< SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11156  		write_csr(dd, target + (i * 8), reg);
11157  	}
11158  	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11159  
11160  	if (drain)
11161  		open_fill_data_vls(dd); /* reopen all VLs */
11162  
11163  err:
11164  	mutex_unlock(&ppd->hls_lock);
11165  
11166  	return ret;
11167  }
11168  
11169  /*
11170   * Read one credit merge VL register.
11171   */
read_one_cm_vl(struct hfi1_devdata * dd,u32 csr,struct vl_limit * vll)11172  static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11173  			   struct vl_limit *vll)
11174  {
11175  	u64 reg = read_csr(dd, csr);
11176  
11177  	vll->dedicated = cpu_to_be16(
11178  		(reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11179  		& SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11180  	vll->shared = cpu_to_be16(
11181  		(reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11182  		& SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11183  }
11184  
11185  /*
11186   * Read the current credit merge limits.
11187   */
get_buffer_control(struct hfi1_devdata * dd,struct buffer_control * bc,u16 * overall_limit)11188  static int get_buffer_control(struct hfi1_devdata *dd,
11189  			      struct buffer_control *bc, u16 *overall_limit)
11190  {
11191  	u64 reg;
11192  	int i;
11193  
11194  	/* not all entries are filled in */
11195  	memset(bc, 0, sizeof(*bc));
11196  
11197  	/* OPA and HFI have a 1-1 mapping */
11198  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
11199  		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11200  
11201  	/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11202  	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11203  
11204  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11205  	bc->overall_shared_limit = cpu_to_be16(
11206  		(reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11207  		& SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11208  	if (overall_limit)
11209  		*overall_limit = (reg
11210  			>> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11211  			& SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11212  	return sizeof(struct buffer_control);
11213  }
11214  
get_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11215  static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11216  {
11217  	u64 reg;
11218  	int i;
11219  
11220  	/* each register contains 16 SC->VLnt mappings, 4 bits each */
11221  	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11222  	for (i = 0; i < sizeof(u64); i++) {
11223  		u8 byte = *(((u8 *)&reg) + i);
11224  
11225  		dp->vlnt[2 * i] = byte & 0xf;
11226  		dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11227  	}
11228  
11229  	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11230  	for (i = 0; i < sizeof(u64); i++) {
11231  		u8 byte = *(((u8 *)&reg) + i);
11232  
11233  		dp->vlnt[16 + (2 * i)] = byte & 0xf;
11234  		dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11235  	}
11236  	return sizeof(struct sc2vlnt);
11237  }
11238  
get_vlarb_preempt(struct hfi1_devdata * dd,u32 nelems,struct ib_vl_weight_elem * vl)11239  static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11240  			      struct ib_vl_weight_elem *vl)
11241  {
11242  	unsigned int i;
11243  
11244  	for (i = 0; i < nelems; i++, vl++) {
11245  		vl->vl = 0xf;
11246  		vl->weight = 0;
11247  	}
11248  }
11249  
set_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11250  static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11251  {
11252  	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11253  		  DC_SC_VL_VAL(15_0,
11254  			       0, dp->vlnt[0] & 0xf,
11255  			       1, dp->vlnt[1] & 0xf,
11256  			       2, dp->vlnt[2] & 0xf,
11257  			       3, dp->vlnt[3] & 0xf,
11258  			       4, dp->vlnt[4] & 0xf,
11259  			       5, dp->vlnt[5] & 0xf,
11260  			       6, dp->vlnt[6] & 0xf,
11261  			       7, dp->vlnt[7] & 0xf,
11262  			       8, dp->vlnt[8] & 0xf,
11263  			       9, dp->vlnt[9] & 0xf,
11264  			       10, dp->vlnt[10] & 0xf,
11265  			       11, dp->vlnt[11] & 0xf,
11266  			       12, dp->vlnt[12] & 0xf,
11267  			       13, dp->vlnt[13] & 0xf,
11268  			       14, dp->vlnt[14] & 0xf,
11269  			       15, dp->vlnt[15] & 0xf));
11270  	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11271  		  DC_SC_VL_VAL(31_16,
11272  			       16, dp->vlnt[16] & 0xf,
11273  			       17, dp->vlnt[17] & 0xf,
11274  			       18, dp->vlnt[18] & 0xf,
11275  			       19, dp->vlnt[19] & 0xf,
11276  			       20, dp->vlnt[20] & 0xf,
11277  			       21, dp->vlnt[21] & 0xf,
11278  			       22, dp->vlnt[22] & 0xf,
11279  			       23, dp->vlnt[23] & 0xf,
11280  			       24, dp->vlnt[24] & 0xf,
11281  			       25, dp->vlnt[25] & 0xf,
11282  			       26, dp->vlnt[26] & 0xf,
11283  			       27, dp->vlnt[27] & 0xf,
11284  			       28, dp->vlnt[28] & 0xf,
11285  			       29, dp->vlnt[29] & 0xf,
11286  			       30, dp->vlnt[30] & 0xf,
11287  			       31, dp->vlnt[31] & 0xf));
11288  }
11289  
nonzero_msg(struct hfi1_devdata * dd,int idx,const char * what,u16 limit)11290  static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11291  			u16 limit)
11292  {
11293  	if (limit != 0)
11294  		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11295  			    what, (int)limit, idx);
11296  }
11297  
11298  /* change only the shared limit portion of SendCmGLobalCredit */
set_global_shared(struct hfi1_devdata * dd,u16 limit)11299  static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11300  {
11301  	u64 reg;
11302  
11303  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11304  	reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11305  	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11306  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11307  }
11308  
11309  /* change only the total credit limit portion of SendCmGLobalCredit */
set_global_limit(struct hfi1_devdata * dd,u16 limit)11310  static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11311  {
11312  	u64 reg;
11313  
11314  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11315  	reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11316  	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11317  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11318  }
11319  
11320  /* set the given per-VL shared limit */
set_vl_shared(struct hfi1_devdata * dd,int vl,u16 limit)11321  static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11322  {
11323  	u64 reg;
11324  	u32 addr;
11325  
11326  	if (vl < TXE_NUM_DATA_VL)
11327  		addr = SEND_CM_CREDIT_VL + (8 * vl);
11328  	else
11329  		addr = SEND_CM_CREDIT_VL15;
11330  
11331  	reg = read_csr(dd, addr);
11332  	reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11333  	reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11334  	write_csr(dd, addr, reg);
11335  }
11336  
11337  /* set the given per-VL dedicated limit */
set_vl_dedicated(struct hfi1_devdata * dd,int vl,u16 limit)11338  static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11339  {
11340  	u64 reg;
11341  	u32 addr;
11342  
11343  	if (vl < TXE_NUM_DATA_VL)
11344  		addr = SEND_CM_CREDIT_VL + (8 * vl);
11345  	else
11346  		addr = SEND_CM_CREDIT_VL15;
11347  
11348  	reg = read_csr(dd, addr);
11349  	reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11350  	reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11351  	write_csr(dd, addr, reg);
11352  }
11353  
11354  /* spin until the given per-VL status mask bits clear */
wait_for_vl_status_clear(struct hfi1_devdata * dd,u64 mask,const char * which)11355  static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11356  				     const char *which)
11357  {
11358  	unsigned long timeout;
11359  	u64 reg;
11360  
11361  	timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11362  	while (1) {
11363  		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11364  
11365  		if (reg == 0)
11366  			return;	/* success */
11367  		if (time_after(jiffies, timeout))
11368  			break;		/* timed out */
11369  		udelay(1);
11370  	}
11371  
11372  	dd_dev_err(dd,
11373  		   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11374  		   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11375  	/*
11376  	 * If this occurs, it is likely there was a credit loss on the link.
11377  	 * The only recovery from that is a link bounce.
11378  	 */
11379  	dd_dev_err(dd,
11380  		   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11381  }
11382  
11383  /*
11384   * The number of credits on the VLs may be changed while everything
11385   * is "live", but the following algorithm must be followed due to
11386   * how the hardware is actually implemented.  In particular,
11387   * Return_Credit_Status[] is the only correct status check.
11388   *
11389   * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11390   *     set Global_Shared_Credit_Limit = 0
11391   *     use_all_vl = 1
11392   * mask0 = all VLs that are changing either dedicated or shared limits
11393   * set Shared_Limit[mask0] = 0
11394   * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11395   * if (changing any dedicated limit)
11396   *     mask1 = all VLs that are lowering dedicated limits
11397   *     lower Dedicated_Limit[mask1]
11398   *     spin until Return_Credit_Status[mask1] == 0
11399   *     raise Dedicated_Limits
11400   * raise Shared_Limits
11401   * raise Global_Shared_Credit_Limit
11402   *
11403   * lower = if the new limit is lower, set the limit to the new value
11404   * raise = if the new limit is higher than the current value (may be changed
11405   *	earlier in the algorithm), set the new limit to the new value
11406   */
set_buffer_control(struct hfi1_pportdata * ppd,struct buffer_control * new_bc)11407  int set_buffer_control(struct hfi1_pportdata *ppd,
11408  		       struct buffer_control *new_bc)
11409  {
11410  	struct hfi1_devdata *dd = ppd->dd;
11411  	u64 changing_mask, ld_mask, stat_mask;
11412  	int change_count;
11413  	int i, use_all_mask;
11414  	int this_shared_changing;
11415  	int vl_count = 0, ret;
11416  	/*
11417  	 * A0: add the variable any_shared_limit_changing below and in the
11418  	 * algorithm above.  If removing A0 support, it can be removed.
11419  	 */
11420  	int any_shared_limit_changing;
11421  	struct buffer_control cur_bc;
11422  	u8 changing[OPA_MAX_VLS];
11423  	u8 lowering_dedicated[OPA_MAX_VLS];
11424  	u16 cur_total;
11425  	u32 new_total = 0;
11426  	const u64 all_mask =
11427  	SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11428  	 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11429  	 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11430  	 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11431  	 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11432  	 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11433  	 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11434  	 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11435  	 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11436  
11437  #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11438  #define NUM_USABLE_VLS 16	/* look at VL15 and less */
11439  
11440  	/* find the new total credits, do sanity check on unused VLs */
11441  	for (i = 0; i < OPA_MAX_VLS; i++) {
11442  		if (valid_vl(i)) {
11443  			new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11444  			continue;
11445  		}
11446  		nonzero_msg(dd, i, "dedicated",
11447  			    be16_to_cpu(new_bc->vl[i].dedicated));
11448  		nonzero_msg(dd, i, "shared",
11449  			    be16_to_cpu(new_bc->vl[i].shared));
11450  		new_bc->vl[i].dedicated = 0;
11451  		new_bc->vl[i].shared = 0;
11452  	}
11453  	new_total += be16_to_cpu(new_bc->overall_shared_limit);
11454  
11455  	/* fetch the current values */
11456  	get_buffer_control(dd, &cur_bc, &cur_total);
11457  
11458  	/*
11459  	 * Create the masks we will use.
11460  	 */
11461  	memset(changing, 0, sizeof(changing));
11462  	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11463  	/*
11464  	 * NOTE: Assumes that the individual VL bits are adjacent and in
11465  	 * increasing order
11466  	 */
11467  	stat_mask =
11468  		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11469  	changing_mask = 0;
11470  	ld_mask = 0;
11471  	change_count = 0;
11472  	any_shared_limit_changing = 0;
11473  	for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11474  		if (!valid_vl(i))
11475  			continue;
11476  		this_shared_changing = new_bc->vl[i].shared
11477  						!= cur_bc.vl[i].shared;
11478  		if (this_shared_changing)
11479  			any_shared_limit_changing = 1;
11480  		if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11481  		    this_shared_changing) {
11482  			changing[i] = 1;
11483  			changing_mask |= stat_mask;
11484  			change_count++;
11485  		}
11486  		if (be16_to_cpu(new_bc->vl[i].dedicated) <
11487  					be16_to_cpu(cur_bc.vl[i].dedicated)) {
11488  			lowering_dedicated[i] = 1;
11489  			ld_mask |= stat_mask;
11490  		}
11491  	}
11492  
11493  	/* bracket the credit change with a total adjustment */
11494  	if (new_total > cur_total)
11495  		set_global_limit(dd, new_total);
11496  
11497  	/*
11498  	 * Start the credit change algorithm.
11499  	 */
11500  	use_all_mask = 0;
11501  	if ((be16_to_cpu(new_bc->overall_shared_limit) <
11502  	     be16_to_cpu(cur_bc.overall_shared_limit)) ||
11503  	    (is_ax(dd) && any_shared_limit_changing)) {
11504  		set_global_shared(dd, 0);
11505  		cur_bc.overall_shared_limit = 0;
11506  		use_all_mask = 1;
11507  	}
11508  
11509  	for (i = 0; i < NUM_USABLE_VLS; i++) {
11510  		if (!valid_vl(i))
11511  			continue;
11512  
11513  		if (changing[i]) {
11514  			set_vl_shared(dd, i, 0);
11515  			cur_bc.vl[i].shared = 0;
11516  		}
11517  	}
11518  
11519  	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11520  				 "shared");
11521  
11522  	if (change_count > 0) {
11523  		for (i = 0; i < NUM_USABLE_VLS; i++) {
11524  			if (!valid_vl(i))
11525  				continue;
11526  
11527  			if (lowering_dedicated[i]) {
11528  				set_vl_dedicated(dd, i,
11529  						 be16_to_cpu(new_bc->
11530  							     vl[i].dedicated));
11531  				cur_bc.vl[i].dedicated =
11532  						new_bc->vl[i].dedicated;
11533  			}
11534  		}
11535  
11536  		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11537  
11538  		/* now raise all dedicated that are going up */
11539  		for (i = 0; i < NUM_USABLE_VLS; i++) {
11540  			if (!valid_vl(i))
11541  				continue;
11542  
11543  			if (be16_to_cpu(new_bc->vl[i].dedicated) >
11544  					be16_to_cpu(cur_bc.vl[i].dedicated))
11545  				set_vl_dedicated(dd, i,
11546  						 be16_to_cpu(new_bc->
11547  							     vl[i].dedicated));
11548  		}
11549  	}
11550  
11551  	/* next raise all shared that are going up */
11552  	for (i = 0; i < NUM_USABLE_VLS; i++) {
11553  		if (!valid_vl(i))
11554  			continue;
11555  
11556  		if (be16_to_cpu(new_bc->vl[i].shared) >
11557  				be16_to_cpu(cur_bc.vl[i].shared))
11558  			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11559  	}
11560  
11561  	/* finally raise the global shared */
11562  	if (be16_to_cpu(new_bc->overall_shared_limit) >
11563  	    be16_to_cpu(cur_bc.overall_shared_limit))
11564  		set_global_shared(dd,
11565  				  be16_to_cpu(new_bc->overall_shared_limit));
11566  
11567  	/* bracket the credit change with a total adjustment */
11568  	if (new_total < cur_total)
11569  		set_global_limit(dd, new_total);
11570  
11571  	/*
11572  	 * Determine the actual number of operational VLS using the number of
11573  	 * dedicated and shared credits for each VL.
11574  	 */
11575  	if (change_count > 0) {
11576  		for (i = 0; i < TXE_NUM_DATA_VL; i++)
11577  			if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11578  			    be16_to_cpu(new_bc->vl[i].shared) > 0)
11579  				vl_count++;
11580  		ppd->actual_vls_operational = vl_count;
11581  		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11582  				    ppd->actual_vls_operational :
11583  				    ppd->vls_operational,
11584  				    NULL);
11585  		if (ret == 0)
11586  			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11587  					   ppd->actual_vls_operational :
11588  					   ppd->vls_operational, NULL);
11589  		if (ret)
11590  			return ret;
11591  	}
11592  	return 0;
11593  }
11594  
11595  /*
11596   * Read the given fabric manager table. Return the size of the
11597   * table (in bytes) on success, and a negative error code on
11598   * failure.
11599   */
fm_get_table(struct hfi1_pportdata * ppd,int which,void * t)11600  int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11601  
11602  {
11603  	int size;
11604  	struct vl_arb_cache *vlc;
11605  
11606  	switch (which) {
11607  	case FM_TBL_VL_HIGH_ARB:
11608  		size = 256;
11609  		/*
11610  		 * OPA specifies 128 elements (of 2 bytes each), though
11611  		 * HFI supports only 16 elements in h/w.
11612  		 */
11613  		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11614  		vl_arb_get_cache(vlc, t);
11615  		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11616  		break;
11617  	case FM_TBL_VL_LOW_ARB:
11618  		size = 256;
11619  		/*
11620  		 * OPA specifies 128 elements (of 2 bytes each), though
11621  		 * HFI supports only 16 elements in h/w.
11622  		 */
11623  		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11624  		vl_arb_get_cache(vlc, t);
11625  		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11626  		break;
11627  	case FM_TBL_BUFFER_CONTROL:
11628  		size = get_buffer_control(ppd->dd, t, NULL);
11629  		break;
11630  	case FM_TBL_SC2VLNT:
11631  		size = get_sc2vlnt(ppd->dd, t);
11632  		break;
11633  	case FM_TBL_VL_PREEMPT_ELEMS:
11634  		size = 256;
11635  		/* OPA specifies 128 elements, of 2 bytes each */
11636  		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11637  		break;
11638  	case FM_TBL_VL_PREEMPT_MATRIX:
11639  		size = 256;
11640  		/*
11641  		 * OPA specifies that this is the same size as the VL
11642  		 * arbitration tables (i.e., 256 bytes).
11643  		 */
11644  		break;
11645  	default:
11646  		return -EINVAL;
11647  	}
11648  	return size;
11649  }
11650  
11651  /*
11652   * Write the given fabric manager table.
11653   */
fm_set_table(struct hfi1_pportdata * ppd,int which,void * t)11654  int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11655  {
11656  	int ret = 0;
11657  	struct vl_arb_cache *vlc;
11658  
11659  	switch (which) {
11660  	case FM_TBL_VL_HIGH_ARB:
11661  		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11662  		if (vl_arb_match_cache(vlc, t)) {
11663  			vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11664  			break;
11665  		}
11666  		vl_arb_set_cache(vlc, t);
11667  		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11668  		ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11669  				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11670  		break;
11671  	case FM_TBL_VL_LOW_ARB:
11672  		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11673  		if (vl_arb_match_cache(vlc, t)) {
11674  			vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11675  			break;
11676  		}
11677  		vl_arb_set_cache(vlc, t);
11678  		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11679  		ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11680  				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11681  		break;
11682  	case FM_TBL_BUFFER_CONTROL:
11683  		ret = set_buffer_control(ppd, t);
11684  		break;
11685  	case FM_TBL_SC2VLNT:
11686  		set_sc2vlnt(ppd->dd, t);
11687  		break;
11688  	default:
11689  		ret = -EINVAL;
11690  	}
11691  	return ret;
11692  }
11693  
11694  /*
11695   * Disable all data VLs.
11696   *
11697   * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11698   */
disable_data_vls(struct hfi1_devdata * dd)11699  static int disable_data_vls(struct hfi1_devdata *dd)
11700  {
11701  	if (is_ax(dd))
11702  		return 1;
11703  
11704  	pio_send_control(dd, PSC_DATA_VL_DISABLE);
11705  
11706  	return 0;
11707  }
11708  
11709  /*
11710   * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11711   * Just re-enables all data VLs (the "fill" part happens
11712   * automatically - the name was chosen for symmetry with
11713   * stop_drain_data_vls()).
11714   *
11715   * Return 0 if successful, non-zero if the VLs cannot be enabled.
11716   */
open_fill_data_vls(struct hfi1_devdata * dd)11717  int open_fill_data_vls(struct hfi1_devdata *dd)
11718  {
11719  	if (is_ax(dd))
11720  		return 1;
11721  
11722  	pio_send_control(dd, PSC_DATA_VL_ENABLE);
11723  
11724  	return 0;
11725  }
11726  
11727  /*
11728   * drain_data_vls() - assumes that disable_data_vls() has been called,
11729   * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11730   * engines to drop to 0.
11731   */
drain_data_vls(struct hfi1_devdata * dd)11732  static void drain_data_vls(struct hfi1_devdata *dd)
11733  {
11734  	sc_wait(dd);
11735  	sdma_wait(dd);
11736  	pause_for_credit_return(dd);
11737  }
11738  
11739  /*
11740   * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11741   *
11742   * Use open_fill_data_vls() to resume using data VLs.  This pair is
11743   * meant to be used like this:
11744   *
11745   * stop_drain_data_vls(dd);
11746   * // do things with per-VL resources
11747   * open_fill_data_vls(dd);
11748   */
stop_drain_data_vls(struct hfi1_devdata * dd)11749  int stop_drain_data_vls(struct hfi1_devdata *dd)
11750  {
11751  	int ret;
11752  
11753  	ret = disable_data_vls(dd);
11754  	if (ret == 0)
11755  		drain_data_vls(dd);
11756  
11757  	return ret;
11758  }
11759  
11760  /*
11761   * Convert a nanosecond time to a cclock count.  No matter how slow
11762   * the cclock, a non-zero ns will always have a non-zero result.
11763   */
ns_to_cclock(struct hfi1_devdata * dd,u32 ns)11764  u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11765  {
11766  	u32 cclocks;
11767  
11768  	if (dd->icode == ICODE_FPGA_EMULATION)
11769  		cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11770  	else  /* simulation pretends to be ASIC */
11771  		cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11772  	if (ns && !cclocks)	/* if ns nonzero, must be at least 1 */
11773  		cclocks = 1;
11774  	return cclocks;
11775  }
11776  
11777  /*
11778   * Convert a cclock count to nanoseconds. Not matter how slow
11779   * the cclock, a non-zero cclocks will always have a non-zero result.
11780   */
cclock_to_ns(struct hfi1_devdata * dd,u32 cclocks)11781  u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11782  {
11783  	u32 ns;
11784  
11785  	if (dd->icode == ICODE_FPGA_EMULATION)
11786  		ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11787  	else  /* simulation pretends to be ASIC */
11788  		ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11789  	if (cclocks && !ns)
11790  		ns = 1;
11791  	return ns;
11792  }
11793  
11794  /*
11795   * Dynamically adjust the receive interrupt timeout for a context based on
11796   * incoming packet rate.
11797   *
11798   * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11799   */
adjust_rcv_timeout(struct hfi1_ctxtdata * rcd,u32 npkts)11800  static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11801  {
11802  	struct hfi1_devdata *dd = rcd->dd;
11803  	u32 timeout = rcd->rcvavail_timeout;
11804  
11805  	/*
11806  	 * This algorithm doubles or halves the timeout depending on whether
11807  	 * the number of packets received in this interrupt were less than or
11808  	 * greater equal the interrupt count.
11809  	 *
11810  	 * The calculations below do not allow a steady state to be achieved.
11811  	 * Only at the endpoints it is possible to have an unchanging
11812  	 * timeout.
11813  	 */
11814  	if (npkts < rcv_intr_count) {
11815  		/*
11816  		 * Not enough packets arrived before the timeout, adjust
11817  		 * timeout downward.
11818  		 */
11819  		if (timeout < 2) /* already at minimum? */
11820  			return;
11821  		timeout >>= 1;
11822  	} else {
11823  		/*
11824  		 * More than enough packets arrived before the timeout, adjust
11825  		 * timeout upward.
11826  		 */
11827  		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11828  			return;
11829  		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11830  	}
11831  
11832  	rcd->rcvavail_timeout = timeout;
11833  	/*
11834  	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11835  	 * been verified to be in range
11836  	 */
11837  	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11838  			(u64)timeout <<
11839  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11840  }
11841  
update_usrhead(struct hfi1_ctxtdata * rcd,u32 hd,u32 updegr,u32 egrhd,u32 intr_adjust,u32 npkts)11842  void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11843  		    u32 intr_adjust, u32 npkts)
11844  {
11845  	struct hfi1_devdata *dd = rcd->dd;
11846  	u64 reg;
11847  	u32 ctxt = rcd->ctxt;
11848  
11849  	/*
11850  	 * Need to write timeout register before updating RcvHdrHead to ensure
11851  	 * that a new value is used when the HW decides to restart counting.
11852  	 */
11853  	if (intr_adjust)
11854  		adjust_rcv_timeout(rcd, npkts);
11855  	if (updegr) {
11856  		reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11857  			<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11858  		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11859  	}
11860  	reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11861  		(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11862  			<< RCV_HDR_HEAD_HEAD_SHIFT);
11863  	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11864  }
11865  
hdrqempty(struct hfi1_ctxtdata * rcd)11866  u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11867  {
11868  	u32 head, tail;
11869  
11870  	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11871  		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11872  
11873  	if (hfi1_rcvhdrtail_kvaddr(rcd))
11874  		tail = get_rcvhdrtail(rcd);
11875  	else
11876  		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11877  
11878  	return head == tail;
11879  }
11880  
11881  /*
11882   * Context Control and Receive Array encoding for buffer size:
11883   *	0x0 invalid
11884   *	0x1   4 KB
11885   *	0x2   8 KB
11886   *	0x3  16 KB
11887   *	0x4  32 KB
11888   *	0x5  64 KB
11889   *	0x6 128 KB
11890   *	0x7 256 KB
11891   *	0x8 512 KB (Receive Array only)
11892   *	0x9   1 MB (Receive Array only)
11893   *	0xa   2 MB (Receive Array only)
11894   *
11895   *	0xB-0xF - reserved (Receive Array only)
11896   *
11897   *
11898   * This routine assumes that the value has already been sanity checked.
11899   */
encoded_size(u32 size)11900  static u32 encoded_size(u32 size)
11901  {
11902  	switch (size) {
11903  	case   4 * 1024: return 0x1;
11904  	case   8 * 1024: return 0x2;
11905  	case  16 * 1024: return 0x3;
11906  	case  32 * 1024: return 0x4;
11907  	case  64 * 1024: return 0x5;
11908  	case 128 * 1024: return 0x6;
11909  	case 256 * 1024: return 0x7;
11910  	case 512 * 1024: return 0x8;
11911  	case   1 * 1024 * 1024: return 0x9;
11912  	case   2 * 1024 * 1024: return 0xa;
11913  	}
11914  	return 0x1;	/* if invalid, go with the minimum size */
11915  }
11916  
11917  /**
11918   * encode_rcv_header_entry_size - return chip specific encoding for size
11919   * @size: size in dwords
11920   *
11921   * Convert a receive header entry size that to the encoding used in the CSR.
11922   *
11923   * Return a zero if the given size is invalid, otherwise the encoding.
11924   */
encode_rcv_header_entry_size(u8 size)11925  u8 encode_rcv_header_entry_size(u8 size)
11926  {
11927  	/* there are only 3 valid receive header entry sizes */
11928  	if (size == 2)
11929  		return 1;
11930  	if (size == 16)
11931  		return 2;
11932  	if (size == 32)
11933  		return 4;
11934  	return 0; /* invalid */
11935  }
11936  
11937  /**
11938   * hfi1_validate_rcvhdrcnt - validate hdrcnt
11939   * @dd: the device data
11940   * @thecnt: the header count
11941   */
hfi1_validate_rcvhdrcnt(struct hfi1_devdata * dd,uint thecnt)11942  int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
11943  {
11944  	if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
11945  		dd_dev_err(dd, "Receive header queue count too small\n");
11946  		return -EINVAL;
11947  	}
11948  
11949  	if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
11950  		dd_dev_err(dd,
11951  			   "Receive header queue count cannot be greater than %u\n",
11952  			   HFI1_MAX_HDRQ_EGRBUF_CNT);
11953  		return -EINVAL;
11954  	}
11955  
11956  	if (thecnt % HDRQ_INCREMENT) {
11957  		dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
11958  			   thecnt, HDRQ_INCREMENT);
11959  		return -EINVAL;
11960  	}
11961  
11962  	return 0;
11963  }
11964  
11965  /**
11966   * set_hdrq_regs - set header queue registers for context
11967   * @dd: the device data
11968   * @ctxt: the context
11969   * @entsize: the dword entry size
11970   * @hdrcnt: the number of header entries
11971   */
set_hdrq_regs(struct hfi1_devdata * dd,u8 ctxt,u8 entsize,u16 hdrcnt)11972  void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
11973  {
11974  	u64 reg;
11975  
11976  	reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
11977  	      RCV_HDR_CNT_CNT_SHIFT;
11978  	write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
11979  	reg = ((u64)encode_rcv_header_entry_size(entsize) &
11980  	       RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
11981  	      RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
11982  	write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
11983  	reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
11984  	      RCV_HDR_SIZE_HDR_SIZE_SHIFT;
11985  	write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
11986  
11987  	/*
11988  	 * Program dummy tail address for every receive context
11989  	 * before enabling any receive context
11990  	 */
11991  	write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11992  			dd->rcvhdrtail_dummy_dma);
11993  }
11994  
hfi1_rcvctrl(struct hfi1_devdata * dd,unsigned int op,struct hfi1_ctxtdata * rcd)11995  void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11996  		  struct hfi1_ctxtdata *rcd)
11997  {
11998  	u64 rcvctrl, reg;
11999  	int did_enable = 0;
12000  	u16 ctxt;
12001  
12002  	if (!rcd)
12003  		return;
12004  
12005  	ctxt = rcd->ctxt;
12006  
12007  	hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
12008  
12009  	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
12010  	/* if the context already enabled, don't do the extra steps */
12011  	if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
12012  	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
12013  		/* reset the tail and hdr addresses, and sequence count */
12014  		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
12015  				rcd->rcvhdrq_dma);
12016  		if (hfi1_rcvhdrtail_kvaddr(rcd))
12017  			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12018  					rcd->rcvhdrqtailaddr_dma);
12019  		hfi1_set_seq_cnt(rcd, 1);
12020  
12021  		/* reset the cached receive header queue head value */
12022  		hfi1_set_rcd_head(rcd, 0);
12023  
12024  		/*
12025  		 * Zero the receive header queue so we don't get false
12026  		 * positives when checking the sequence number.  The
12027  		 * sequence numbers could land exactly on the same spot.
12028  		 * E.g. a rcd restart before the receive header wrapped.
12029  		 */
12030  		memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
12031  
12032  		/* starting timeout */
12033  		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
12034  
12035  		/* enable the context */
12036  		rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
12037  
12038  		/* clean the egr buffer size first */
12039  		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12040  		rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
12041  				& RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
12042  					<< RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
12043  
12044  		/* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
12045  		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
12046  		did_enable = 1;
12047  
12048  		/* zero RcvEgrIndexHead */
12049  		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
12050  
12051  		/* set eager count and base index */
12052  		reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
12053  			& RCV_EGR_CTRL_EGR_CNT_MASK)
12054  		       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
12055  			(((rcd->eager_base >> RCV_SHIFT)
12056  			  & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
12057  			 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
12058  		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
12059  
12060  		/*
12061  		 * Set TID (expected) count and base index.
12062  		 * rcd->expected_count is set to individual RcvArray entries,
12063  		 * not pairs, and the CSR takes a pair-count in groups of
12064  		 * four, so divide by 8.
12065  		 */
12066  		reg = (((rcd->expected_count >> RCV_SHIFT)
12067  					& RCV_TID_CTRL_TID_PAIR_CNT_MASK)
12068  				<< RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
12069  		      (((rcd->expected_base >> RCV_SHIFT)
12070  					& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
12071  				<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
12072  		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
12073  		if (ctxt == HFI1_CTRL_CTXT)
12074  			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
12075  	}
12076  	if (op & HFI1_RCVCTRL_CTXT_DIS) {
12077  		write_csr(dd, RCV_VL15, 0);
12078  		/*
12079  		 * When receive context is being disabled turn on tail
12080  		 * update with a dummy tail address and then disable
12081  		 * receive context.
12082  		 */
12083  		if (dd->rcvhdrtail_dummy_dma) {
12084  			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12085  					dd->rcvhdrtail_dummy_dma);
12086  			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
12087  			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12088  		}
12089  
12090  		rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
12091  	}
12092  	if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
12093  		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12094  			      IS_RCVAVAIL_START + rcd->ctxt, true);
12095  		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12096  	}
12097  	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
12098  		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12099  			      IS_RCVAVAIL_START + rcd->ctxt, false);
12100  		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12101  	}
12102  	if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
12103  		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12104  	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
12105  		/* See comment on RcvCtxtCtrl.TailUpd above */
12106  		if (!(op & HFI1_RCVCTRL_CTXT_DIS))
12107  			rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12108  	}
12109  	if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
12110  		rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12111  	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
12112  		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12113  	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
12114  		/*
12115  		 * In one-packet-per-eager mode, the size comes from
12116  		 * the RcvArray entry.
12117  		 */
12118  		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12119  		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12120  	}
12121  	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12122  		rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12123  	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12124  		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12125  	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12126  		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12127  	if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12128  		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12129  	if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12130  		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12131  	if (op & HFI1_RCVCTRL_URGENT_ENB)
12132  		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12133  			      IS_RCVURGENT_START + rcd->ctxt, true);
12134  	if (op & HFI1_RCVCTRL_URGENT_DIS)
12135  		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12136  			      IS_RCVURGENT_START + rcd->ctxt, false);
12137  
12138  	hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12139  	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12140  
12141  	/* work around sticky RcvCtxtStatus.BlockedRHQFull */
12142  	if (did_enable &&
12143  	    (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12144  		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12145  		if (reg != 0) {
12146  			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12147  				    ctxt, reg);
12148  			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12149  			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12150  			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12151  			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12152  			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12153  			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12154  				    ctxt, reg, reg == 0 ? "not" : "still");
12155  		}
12156  	}
12157  
12158  	if (did_enable) {
12159  		/*
12160  		 * The interrupt timeout and count must be set after
12161  		 * the context is enabled to take effect.
12162  		 */
12163  		/* set interrupt timeout */
12164  		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12165  				(u64)rcd->rcvavail_timeout <<
12166  				RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12167  
12168  		/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12169  		reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12170  		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12171  	}
12172  
12173  	if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12174  		/*
12175  		 * If the context has been disabled and the Tail Update has
12176  		 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12177  		 * so it doesn't contain an address that is invalid.
12178  		 */
12179  		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12180  				dd->rcvhdrtail_dummy_dma);
12181  }
12182  
hfi1_read_cntrs(struct hfi1_devdata * dd,char ** namep,u64 ** cntrp)12183  u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12184  {
12185  	int ret;
12186  	u64 val = 0;
12187  
12188  	if (namep) {
12189  		ret = dd->cntrnameslen;
12190  		*namep = dd->cntrnames;
12191  	} else {
12192  		const struct cntr_entry *entry;
12193  		int i, j;
12194  
12195  		ret = (dd->ndevcntrs) * sizeof(u64);
12196  
12197  		/* Get the start of the block of counters */
12198  		*cntrp = dd->cntrs;
12199  
12200  		/*
12201  		 * Now go and fill in each counter in the block.
12202  		 */
12203  		for (i = 0; i < DEV_CNTR_LAST; i++) {
12204  			entry = &dev_cntrs[i];
12205  			hfi1_cdbg(CNTR, "reading %s", entry->name);
12206  			if (entry->flags & CNTR_DISABLED) {
12207  				/* Nothing */
12208  				hfi1_cdbg(CNTR, "\tDisabled\n");
12209  			} else {
12210  				if (entry->flags & CNTR_VL) {
12211  					hfi1_cdbg(CNTR, "\tPer VL\n");
12212  					for (j = 0; j < C_VL_COUNT; j++) {
12213  						val = entry->rw_cntr(entry,
12214  								  dd, j,
12215  								  CNTR_MODE_R,
12216  								  0);
12217  						hfi1_cdbg(
12218  						   CNTR,
12219  						   "\t\tRead 0x%llx for %d\n",
12220  						   val, j);
12221  						dd->cntrs[entry->offset + j] =
12222  									    val;
12223  					}
12224  				} else if (entry->flags & CNTR_SDMA) {
12225  					hfi1_cdbg(CNTR,
12226  						  "\t Per SDMA Engine\n");
12227  					for (j = 0; j < chip_sdma_engines(dd);
12228  					     j++) {
12229  						val =
12230  						entry->rw_cntr(entry, dd, j,
12231  							       CNTR_MODE_R, 0);
12232  						hfi1_cdbg(CNTR,
12233  							  "\t\tRead 0x%llx for %d\n",
12234  							  val, j);
12235  						dd->cntrs[entry->offset + j] =
12236  									val;
12237  					}
12238  				} else {
12239  					val = entry->rw_cntr(entry, dd,
12240  							CNTR_INVALID_VL,
12241  							CNTR_MODE_R, 0);
12242  					dd->cntrs[entry->offset] = val;
12243  					hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12244  				}
12245  			}
12246  		}
12247  	}
12248  	return ret;
12249  }
12250  
12251  /*
12252   * Used by sysfs to create files for hfi stats to read
12253   */
hfi1_read_portcntrs(struct hfi1_pportdata * ppd,char ** namep,u64 ** cntrp)12254  u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12255  {
12256  	int ret;
12257  	u64 val = 0;
12258  
12259  	if (namep) {
12260  		ret = ppd->dd->portcntrnameslen;
12261  		*namep = ppd->dd->portcntrnames;
12262  	} else {
12263  		const struct cntr_entry *entry;
12264  		int i, j;
12265  
12266  		ret = ppd->dd->nportcntrs * sizeof(u64);
12267  		*cntrp = ppd->cntrs;
12268  
12269  		for (i = 0; i < PORT_CNTR_LAST; i++) {
12270  			entry = &port_cntrs[i];
12271  			hfi1_cdbg(CNTR, "reading %s", entry->name);
12272  			if (entry->flags & CNTR_DISABLED) {
12273  				/* Nothing */
12274  				hfi1_cdbg(CNTR, "\tDisabled\n");
12275  				continue;
12276  			}
12277  
12278  			if (entry->flags & CNTR_VL) {
12279  				hfi1_cdbg(CNTR, "\tPer VL");
12280  				for (j = 0; j < C_VL_COUNT; j++) {
12281  					val = entry->rw_cntr(entry, ppd, j,
12282  							       CNTR_MODE_R,
12283  							       0);
12284  					hfi1_cdbg(
12285  					   CNTR,
12286  					   "\t\tRead 0x%llx for %d",
12287  					   val, j);
12288  					ppd->cntrs[entry->offset + j] = val;
12289  				}
12290  			} else {
12291  				val = entry->rw_cntr(entry, ppd,
12292  						       CNTR_INVALID_VL,
12293  						       CNTR_MODE_R,
12294  						       0);
12295  				ppd->cntrs[entry->offset] = val;
12296  				hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12297  			}
12298  		}
12299  	}
12300  	return ret;
12301  }
12302  
free_cntrs(struct hfi1_devdata * dd)12303  static void free_cntrs(struct hfi1_devdata *dd)
12304  {
12305  	struct hfi1_pportdata *ppd;
12306  	int i;
12307  
12308  	if (dd->synth_stats_timer.function)
12309  		del_timer_sync(&dd->synth_stats_timer);
12310  	ppd = (struct hfi1_pportdata *)(dd + 1);
12311  	for (i = 0; i < dd->num_pports; i++, ppd++) {
12312  		kfree(ppd->cntrs);
12313  		kfree(ppd->scntrs);
12314  		free_percpu(ppd->ibport_data.rvp.rc_acks);
12315  		free_percpu(ppd->ibport_data.rvp.rc_qacks);
12316  		free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12317  		ppd->cntrs = NULL;
12318  		ppd->scntrs = NULL;
12319  		ppd->ibport_data.rvp.rc_acks = NULL;
12320  		ppd->ibport_data.rvp.rc_qacks = NULL;
12321  		ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12322  	}
12323  	kfree(dd->portcntrnames);
12324  	dd->portcntrnames = NULL;
12325  	kfree(dd->cntrs);
12326  	dd->cntrs = NULL;
12327  	kfree(dd->scntrs);
12328  	dd->scntrs = NULL;
12329  	kfree(dd->cntrnames);
12330  	dd->cntrnames = NULL;
12331  	if (dd->update_cntr_wq) {
12332  		destroy_workqueue(dd->update_cntr_wq);
12333  		dd->update_cntr_wq = NULL;
12334  	}
12335  }
12336  
read_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl)12337  static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12338  			      u64 *psval, void *context, int vl)
12339  {
12340  	u64 val;
12341  	u64 sval = *psval;
12342  
12343  	if (entry->flags & CNTR_DISABLED) {
12344  		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12345  		return 0;
12346  	}
12347  
12348  	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12349  
12350  	val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12351  
12352  	/* If its a synthetic counter there is more work we need to do */
12353  	if (entry->flags & CNTR_SYNTH) {
12354  		if (sval == CNTR_MAX) {
12355  			/* No need to read already saturated */
12356  			return CNTR_MAX;
12357  		}
12358  
12359  		if (entry->flags & CNTR_32BIT) {
12360  			/* 32bit counters can wrap multiple times */
12361  			u64 upper = sval >> 32;
12362  			u64 lower = (sval << 32) >> 32;
12363  
12364  			if (lower > val) { /* hw wrapped */
12365  				if (upper == CNTR_32BIT_MAX)
12366  					val = CNTR_MAX;
12367  				else
12368  					upper++;
12369  			}
12370  
12371  			if (val != CNTR_MAX)
12372  				val = (upper << 32) | val;
12373  
12374  		} else {
12375  			/* If we rolled we are saturated */
12376  			if ((val < sval) || (val > CNTR_MAX))
12377  				val = CNTR_MAX;
12378  		}
12379  	}
12380  
12381  	*psval = val;
12382  
12383  	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12384  
12385  	return val;
12386  }
12387  
write_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl,u64 data)12388  static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12389  			       struct cntr_entry *entry,
12390  			       u64 *psval, void *context, int vl, u64 data)
12391  {
12392  	u64 val;
12393  
12394  	if (entry->flags & CNTR_DISABLED) {
12395  		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12396  		return 0;
12397  	}
12398  
12399  	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12400  
12401  	if (entry->flags & CNTR_SYNTH) {
12402  		*psval = data;
12403  		if (entry->flags & CNTR_32BIT) {
12404  			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12405  					     (data << 32) >> 32);
12406  			val = data; /* return the full 64bit value */
12407  		} else {
12408  			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12409  					     data);
12410  		}
12411  	} else {
12412  		val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12413  	}
12414  
12415  	*psval = val;
12416  
12417  	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12418  
12419  	return val;
12420  }
12421  
read_dev_cntr(struct hfi1_devdata * dd,int index,int vl)12422  u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12423  {
12424  	struct cntr_entry *entry;
12425  	u64 *sval;
12426  
12427  	entry = &dev_cntrs[index];
12428  	sval = dd->scntrs + entry->offset;
12429  
12430  	if (vl != CNTR_INVALID_VL)
12431  		sval += vl;
12432  
12433  	return read_dev_port_cntr(dd, entry, sval, dd, vl);
12434  }
12435  
write_dev_cntr(struct hfi1_devdata * dd,int index,int vl,u64 data)12436  u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12437  {
12438  	struct cntr_entry *entry;
12439  	u64 *sval;
12440  
12441  	entry = &dev_cntrs[index];
12442  	sval = dd->scntrs + entry->offset;
12443  
12444  	if (vl != CNTR_INVALID_VL)
12445  		sval += vl;
12446  
12447  	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12448  }
12449  
read_port_cntr(struct hfi1_pportdata * ppd,int index,int vl)12450  u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12451  {
12452  	struct cntr_entry *entry;
12453  	u64 *sval;
12454  
12455  	entry = &port_cntrs[index];
12456  	sval = ppd->scntrs + entry->offset;
12457  
12458  	if (vl != CNTR_INVALID_VL)
12459  		sval += vl;
12460  
12461  	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12462  	    (index <= C_RCV_HDR_OVF_LAST)) {
12463  		/* We do not want to bother for disabled contexts */
12464  		return 0;
12465  	}
12466  
12467  	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12468  }
12469  
write_port_cntr(struct hfi1_pportdata * ppd,int index,int vl,u64 data)12470  u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12471  {
12472  	struct cntr_entry *entry;
12473  	u64 *sval;
12474  
12475  	entry = &port_cntrs[index];
12476  	sval = ppd->scntrs + entry->offset;
12477  
12478  	if (vl != CNTR_INVALID_VL)
12479  		sval += vl;
12480  
12481  	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12482  	    (index <= C_RCV_HDR_OVF_LAST)) {
12483  		/* We do not want to bother for disabled contexts */
12484  		return 0;
12485  	}
12486  
12487  	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12488  }
12489  
do_update_synth_timer(struct work_struct * work)12490  static void do_update_synth_timer(struct work_struct *work)
12491  {
12492  	u64 cur_tx;
12493  	u64 cur_rx;
12494  	u64 total_flits;
12495  	u8 update = 0;
12496  	int i, j, vl;
12497  	struct hfi1_pportdata *ppd;
12498  	struct cntr_entry *entry;
12499  	struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12500  					       update_cntr_work);
12501  
12502  	/*
12503  	 * Rather than keep beating on the CSRs pick a minimal set that we can
12504  	 * check to watch for potential roll over. We can do this by looking at
12505  	 * the number of flits sent/recv. If the total flits exceeds 32bits then
12506  	 * we have to iterate all the counters and update.
12507  	 */
12508  	entry = &dev_cntrs[C_DC_RCV_FLITS];
12509  	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12510  
12511  	entry = &dev_cntrs[C_DC_XMIT_FLITS];
12512  	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12513  
12514  	hfi1_cdbg(
12515  	    CNTR,
12516  	    "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12517  	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12518  
12519  	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12520  		/*
12521  		 * May not be strictly necessary to update but it won't hurt and
12522  		 * simplifies the logic here.
12523  		 */
12524  		update = 1;
12525  		hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12526  			  dd->unit);
12527  	} else {
12528  		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12529  		hfi1_cdbg(CNTR,
12530  			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12531  			  total_flits, (u64)CNTR_32BIT_MAX);
12532  		if (total_flits >= CNTR_32BIT_MAX) {
12533  			hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12534  				  dd->unit);
12535  			update = 1;
12536  		}
12537  	}
12538  
12539  	if (update) {
12540  		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12541  		for (i = 0; i < DEV_CNTR_LAST; i++) {
12542  			entry = &dev_cntrs[i];
12543  			if (entry->flags & CNTR_VL) {
12544  				for (vl = 0; vl < C_VL_COUNT; vl++)
12545  					read_dev_cntr(dd, i, vl);
12546  			} else {
12547  				read_dev_cntr(dd, i, CNTR_INVALID_VL);
12548  			}
12549  		}
12550  		ppd = (struct hfi1_pportdata *)(dd + 1);
12551  		for (i = 0; i < dd->num_pports; i++, ppd++) {
12552  			for (j = 0; j < PORT_CNTR_LAST; j++) {
12553  				entry = &port_cntrs[j];
12554  				if (entry->flags & CNTR_VL) {
12555  					for (vl = 0; vl < C_VL_COUNT; vl++)
12556  						read_port_cntr(ppd, j, vl);
12557  				} else {
12558  					read_port_cntr(ppd, j, CNTR_INVALID_VL);
12559  				}
12560  			}
12561  		}
12562  
12563  		/*
12564  		 * We want the value in the register. The goal is to keep track
12565  		 * of the number of "ticks" not the counter value. In other
12566  		 * words if the register rolls we want to notice it and go ahead
12567  		 * and force an update.
12568  		 */
12569  		entry = &dev_cntrs[C_DC_XMIT_FLITS];
12570  		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12571  						CNTR_MODE_R, 0);
12572  
12573  		entry = &dev_cntrs[C_DC_RCV_FLITS];
12574  		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12575  						CNTR_MODE_R, 0);
12576  
12577  		hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12578  			  dd->unit, dd->last_tx, dd->last_rx);
12579  
12580  	} else {
12581  		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12582  	}
12583  }
12584  
update_synth_timer(struct timer_list * t)12585  static void update_synth_timer(struct timer_list *t)
12586  {
12587  	struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12588  
12589  	queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12590  	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12591  }
12592  
12593  #define C_MAX_NAME 16 /* 15 chars + one for /0 */
init_cntrs(struct hfi1_devdata * dd)12594  static int init_cntrs(struct hfi1_devdata *dd)
12595  {
12596  	int i, rcv_ctxts, j;
12597  	size_t sz;
12598  	char *p;
12599  	char name[C_MAX_NAME];
12600  	struct hfi1_pportdata *ppd;
12601  	const char *bit_type_32 = ",32";
12602  	const int bit_type_32_sz = strlen(bit_type_32);
12603  	u32 sdma_engines = chip_sdma_engines(dd);
12604  
12605  	/* set up the stats timer; the add_timer is done at the end */
12606  	timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12607  
12608  	/***********************/
12609  	/* per device counters */
12610  	/***********************/
12611  
12612  	/* size names and determine how many we have*/
12613  	dd->ndevcntrs = 0;
12614  	sz = 0;
12615  
12616  	for (i = 0; i < DEV_CNTR_LAST; i++) {
12617  		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12618  			hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12619  			continue;
12620  		}
12621  
12622  		if (dev_cntrs[i].flags & CNTR_VL) {
12623  			dev_cntrs[i].offset = dd->ndevcntrs;
12624  			for (j = 0; j < C_VL_COUNT; j++) {
12625  				snprintf(name, C_MAX_NAME, "%s%d",
12626  					 dev_cntrs[i].name, vl_from_idx(j));
12627  				sz += strlen(name);
12628  				/* Add ",32" for 32-bit counters */
12629  				if (dev_cntrs[i].flags & CNTR_32BIT)
12630  					sz += bit_type_32_sz;
12631  				sz++;
12632  				dd->ndevcntrs++;
12633  			}
12634  		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12635  			dev_cntrs[i].offset = dd->ndevcntrs;
12636  			for (j = 0; j < sdma_engines; j++) {
12637  				snprintf(name, C_MAX_NAME, "%s%d",
12638  					 dev_cntrs[i].name, j);
12639  				sz += strlen(name);
12640  				/* Add ",32" for 32-bit counters */
12641  				if (dev_cntrs[i].flags & CNTR_32BIT)
12642  					sz += bit_type_32_sz;
12643  				sz++;
12644  				dd->ndevcntrs++;
12645  			}
12646  		} else {
12647  			/* +1 for newline. */
12648  			sz += strlen(dev_cntrs[i].name) + 1;
12649  			/* Add ",32" for 32-bit counters */
12650  			if (dev_cntrs[i].flags & CNTR_32BIT)
12651  				sz += bit_type_32_sz;
12652  			dev_cntrs[i].offset = dd->ndevcntrs;
12653  			dd->ndevcntrs++;
12654  		}
12655  	}
12656  
12657  	/* allocate space for the counter values */
12658  	dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12659  			    GFP_KERNEL);
12660  	if (!dd->cntrs)
12661  		goto bail;
12662  
12663  	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12664  	if (!dd->scntrs)
12665  		goto bail;
12666  
12667  	/* allocate space for the counter names */
12668  	dd->cntrnameslen = sz;
12669  	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12670  	if (!dd->cntrnames)
12671  		goto bail;
12672  
12673  	/* fill in the names */
12674  	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12675  		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12676  			/* Nothing */
12677  		} else if (dev_cntrs[i].flags & CNTR_VL) {
12678  			for (j = 0; j < C_VL_COUNT; j++) {
12679  				snprintf(name, C_MAX_NAME, "%s%d",
12680  					 dev_cntrs[i].name,
12681  					 vl_from_idx(j));
12682  				memcpy(p, name, strlen(name));
12683  				p += strlen(name);
12684  
12685  				/* Counter is 32 bits */
12686  				if (dev_cntrs[i].flags & CNTR_32BIT) {
12687  					memcpy(p, bit_type_32, bit_type_32_sz);
12688  					p += bit_type_32_sz;
12689  				}
12690  
12691  				*p++ = '\n';
12692  			}
12693  		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12694  			for (j = 0; j < sdma_engines; j++) {
12695  				snprintf(name, C_MAX_NAME, "%s%d",
12696  					 dev_cntrs[i].name, j);
12697  				memcpy(p, name, strlen(name));
12698  				p += strlen(name);
12699  
12700  				/* Counter is 32 bits */
12701  				if (dev_cntrs[i].flags & CNTR_32BIT) {
12702  					memcpy(p, bit_type_32, bit_type_32_sz);
12703  					p += bit_type_32_sz;
12704  				}
12705  
12706  				*p++ = '\n';
12707  			}
12708  		} else {
12709  			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12710  			p += strlen(dev_cntrs[i].name);
12711  
12712  			/* Counter is 32 bits */
12713  			if (dev_cntrs[i].flags & CNTR_32BIT) {
12714  				memcpy(p, bit_type_32, bit_type_32_sz);
12715  				p += bit_type_32_sz;
12716  			}
12717  
12718  			*p++ = '\n';
12719  		}
12720  	}
12721  
12722  	/*********************/
12723  	/* per port counters */
12724  	/*********************/
12725  
12726  	/*
12727  	 * Go through the counters for the overflows and disable the ones we
12728  	 * don't need. This varies based on platform so we need to do it
12729  	 * dynamically here.
12730  	 */
12731  	rcv_ctxts = dd->num_rcv_contexts;
12732  	for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12733  	     i <= C_RCV_HDR_OVF_LAST; i++) {
12734  		port_cntrs[i].flags |= CNTR_DISABLED;
12735  	}
12736  
12737  	/* size port counter names and determine how many we have*/
12738  	sz = 0;
12739  	dd->nportcntrs = 0;
12740  	for (i = 0; i < PORT_CNTR_LAST; i++) {
12741  		if (port_cntrs[i].flags & CNTR_DISABLED) {
12742  			hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12743  			continue;
12744  		}
12745  
12746  		if (port_cntrs[i].flags & CNTR_VL) {
12747  			port_cntrs[i].offset = dd->nportcntrs;
12748  			for (j = 0; j < C_VL_COUNT; j++) {
12749  				snprintf(name, C_MAX_NAME, "%s%d",
12750  					 port_cntrs[i].name, vl_from_idx(j));
12751  				sz += strlen(name);
12752  				/* Add ",32" for 32-bit counters */
12753  				if (port_cntrs[i].flags & CNTR_32BIT)
12754  					sz += bit_type_32_sz;
12755  				sz++;
12756  				dd->nportcntrs++;
12757  			}
12758  		} else {
12759  			/* +1 for newline */
12760  			sz += strlen(port_cntrs[i].name) + 1;
12761  			/* Add ",32" for 32-bit counters */
12762  			if (port_cntrs[i].flags & CNTR_32BIT)
12763  				sz += bit_type_32_sz;
12764  			port_cntrs[i].offset = dd->nportcntrs;
12765  			dd->nportcntrs++;
12766  		}
12767  	}
12768  
12769  	/* allocate space for the counter names */
12770  	dd->portcntrnameslen = sz;
12771  	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12772  	if (!dd->portcntrnames)
12773  		goto bail;
12774  
12775  	/* fill in port cntr names */
12776  	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12777  		if (port_cntrs[i].flags & CNTR_DISABLED)
12778  			continue;
12779  
12780  		if (port_cntrs[i].flags & CNTR_VL) {
12781  			for (j = 0; j < C_VL_COUNT; j++) {
12782  				snprintf(name, C_MAX_NAME, "%s%d",
12783  					 port_cntrs[i].name, vl_from_idx(j));
12784  				memcpy(p, name, strlen(name));
12785  				p += strlen(name);
12786  
12787  				/* Counter is 32 bits */
12788  				if (port_cntrs[i].flags & CNTR_32BIT) {
12789  					memcpy(p, bit_type_32, bit_type_32_sz);
12790  					p += bit_type_32_sz;
12791  				}
12792  
12793  				*p++ = '\n';
12794  			}
12795  		} else {
12796  			memcpy(p, port_cntrs[i].name,
12797  			       strlen(port_cntrs[i].name));
12798  			p += strlen(port_cntrs[i].name);
12799  
12800  			/* Counter is 32 bits */
12801  			if (port_cntrs[i].flags & CNTR_32BIT) {
12802  				memcpy(p, bit_type_32, bit_type_32_sz);
12803  				p += bit_type_32_sz;
12804  			}
12805  
12806  			*p++ = '\n';
12807  		}
12808  	}
12809  
12810  	/* allocate per port storage for counter values */
12811  	ppd = (struct hfi1_pportdata *)(dd + 1);
12812  	for (i = 0; i < dd->num_pports; i++, ppd++) {
12813  		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12814  		if (!ppd->cntrs)
12815  			goto bail;
12816  
12817  		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12818  		if (!ppd->scntrs)
12819  			goto bail;
12820  	}
12821  
12822  	/* CPU counters need to be allocated and zeroed */
12823  	if (init_cpu_counters(dd))
12824  		goto bail;
12825  
12826  	dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12827  						     WQ_MEM_RECLAIM, dd->unit);
12828  	if (!dd->update_cntr_wq)
12829  		goto bail;
12830  
12831  	INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12832  
12833  	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12834  	return 0;
12835  bail:
12836  	free_cntrs(dd);
12837  	return -ENOMEM;
12838  }
12839  
chip_to_opa_lstate(struct hfi1_devdata * dd,u32 chip_lstate)12840  static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12841  {
12842  	switch (chip_lstate) {
12843  	case LSTATE_DOWN:
12844  		return IB_PORT_DOWN;
12845  	case LSTATE_INIT:
12846  		return IB_PORT_INIT;
12847  	case LSTATE_ARMED:
12848  		return IB_PORT_ARMED;
12849  	case LSTATE_ACTIVE:
12850  		return IB_PORT_ACTIVE;
12851  	default:
12852  		dd_dev_err(dd,
12853  			   "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12854  			   chip_lstate);
12855  		return IB_PORT_DOWN;
12856  	}
12857  }
12858  
chip_to_opa_pstate(struct hfi1_devdata * dd,u32 chip_pstate)12859  u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12860  {
12861  	/* look at the HFI meta-states only */
12862  	switch (chip_pstate & 0xf0) {
12863  	case PLS_DISABLED:
12864  		return IB_PORTPHYSSTATE_DISABLED;
12865  	case PLS_OFFLINE:
12866  		return OPA_PORTPHYSSTATE_OFFLINE;
12867  	case PLS_POLLING:
12868  		return IB_PORTPHYSSTATE_POLLING;
12869  	case PLS_CONFIGPHY:
12870  		return IB_PORTPHYSSTATE_TRAINING;
12871  	case PLS_LINKUP:
12872  		return IB_PORTPHYSSTATE_LINKUP;
12873  	case PLS_PHYTEST:
12874  		return IB_PORTPHYSSTATE_PHY_TEST;
12875  	default:
12876  		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12877  			   chip_pstate);
12878  		return IB_PORTPHYSSTATE_DISABLED;
12879  	}
12880  }
12881  
12882  /* return the OPA port logical state name */
opa_lstate_name(u32 lstate)12883  const char *opa_lstate_name(u32 lstate)
12884  {
12885  	static const char * const port_logical_names[] = {
12886  		"PORT_NOP",
12887  		"PORT_DOWN",
12888  		"PORT_INIT",
12889  		"PORT_ARMED",
12890  		"PORT_ACTIVE",
12891  		"PORT_ACTIVE_DEFER",
12892  	};
12893  	if (lstate < ARRAY_SIZE(port_logical_names))
12894  		return port_logical_names[lstate];
12895  	return "unknown";
12896  }
12897  
12898  /* return the OPA port physical state name */
opa_pstate_name(u32 pstate)12899  const char *opa_pstate_name(u32 pstate)
12900  {
12901  	static const char * const port_physical_names[] = {
12902  		"PHYS_NOP",
12903  		"reserved1",
12904  		"PHYS_POLL",
12905  		"PHYS_DISABLED",
12906  		"PHYS_TRAINING",
12907  		"PHYS_LINKUP",
12908  		"PHYS_LINK_ERR_RECOVER",
12909  		"PHYS_PHY_TEST",
12910  		"reserved8",
12911  		"PHYS_OFFLINE",
12912  		"PHYS_GANGED",
12913  		"PHYS_TEST",
12914  	};
12915  	if (pstate < ARRAY_SIZE(port_physical_names))
12916  		return port_physical_names[pstate];
12917  	return "unknown";
12918  }
12919  
12920  /**
12921   * update_statusp - Update userspace status flag
12922   * @ppd: Port data structure
12923   * @state: port state information
12924   *
12925   * Actual port status is determined by the host_link_state value
12926   * in the ppd.
12927   *
12928   * host_link_state MUST be updated before updating the user space
12929   * statusp.
12930   */
update_statusp(struct hfi1_pportdata * ppd,u32 state)12931  static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12932  {
12933  	/*
12934  	 * Set port status flags in the page mapped into userspace
12935  	 * memory. Do it here to ensure a reliable state - this is
12936  	 * the only function called by all state handling code.
12937  	 * Always set the flags due to the fact that the cache value
12938  	 * might have been changed explicitly outside of this
12939  	 * function.
12940  	 */
12941  	if (ppd->statusp) {
12942  		switch (state) {
12943  		case IB_PORT_DOWN:
12944  		case IB_PORT_INIT:
12945  			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12946  					   HFI1_STATUS_IB_READY);
12947  			break;
12948  		case IB_PORT_ARMED:
12949  			*ppd->statusp |= HFI1_STATUS_IB_CONF;
12950  			break;
12951  		case IB_PORT_ACTIVE:
12952  			*ppd->statusp |= HFI1_STATUS_IB_READY;
12953  			break;
12954  		}
12955  	}
12956  	dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12957  		    opa_lstate_name(state), state);
12958  }
12959  
12960  /**
12961   * wait_logical_linkstate - wait for an IB link state change to occur
12962   * @ppd: port device
12963   * @state: the state to wait for
12964   * @msecs: the number of milliseconds to wait
12965   *
12966   * Wait up to msecs milliseconds for IB link state change to occur.
12967   * For now, take the easy polling route.
12968   * Returns 0 if state reached, otherwise -ETIMEDOUT.
12969   */
wait_logical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)12970  static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12971  				  int msecs)
12972  {
12973  	unsigned long timeout;
12974  	u32 new_state;
12975  
12976  	timeout = jiffies + msecs_to_jiffies(msecs);
12977  	while (1) {
12978  		new_state = chip_to_opa_lstate(ppd->dd,
12979  					       read_logical_state(ppd->dd));
12980  		if (new_state == state)
12981  			break;
12982  		if (time_after(jiffies, timeout)) {
12983  			dd_dev_err(ppd->dd,
12984  				   "timeout waiting for link state 0x%x\n",
12985  				   state);
12986  			return -ETIMEDOUT;
12987  		}
12988  		msleep(20);
12989  	}
12990  
12991  	return 0;
12992  }
12993  
log_state_transition(struct hfi1_pportdata * ppd,u32 state)12994  static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12995  {
12996  	u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12997  
12998  	dd_dev_info(ppd->dd,
12999  		    "physical state changed to %s (0x%x), phy 0x%x\n",
13000  		    opa_pstate_name(ib_pstate), ib_pstate, state);
13001  }
13002  
13003  /*
13004   * Read the physical hardware link state and check if it matches host
13005   * drivers anticipated state.
13006   */
log_physical_state(struct hfi1_pportdata * ppd,u32 state)13007  static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
13008  {
13009  	u32 read_state = read_physical_state(ppd->dd);
13010  
13011  	if (read_state == state) {
13012  		log_state_transition(ppd, state);
13013  	} else {
13014  		dd_dev_err(ppd->dd,
13015  			   "anticipated phy link state 0x%x, read 0x%x\n",
13016  			   state, read_state);
13017  	}
13018  }
13019  
13020  /*
13021   * wait_physical_linkstate - wait for an physical link state change to occur
13022   * @ppd: port device
13023   * @state: the state to wait for
13024   * @msecs: the number of milliseconds to wait
13025   *
13026   * Wait up to msecs milliseconds for physical link state change to occur.
13027   * Returns 0 if state reached, otherwise -ETIMEDOUT.
13028   */
wait_physical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)13029  static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
13030  				   int msecs)
13031  {
13032  	u32 read_state;
13033  	unsigned long timeout;
13034  
13035  	timeout = jiffies + msecs_to_jiffies(msecs);
13036  	while (1) {
13037  		read_state = read_physical_state(ppd->dd);
13038  		if (read_state == state)
13039  			break;
13040  		if (time_after(jiffies, timeout)) {
13041  			dd_dev_err(ppd->dd,
13042  				   "timeout waiting for phy link state 0x%x\n",
13043  				   state);
13044  			return -ETIMEDOUT;
13045  		}
13046  		usleep_range(1950, 2050); /* sleep 2ms-ish */
13047  	}
13048  
13049  	log_state_transition(ppd, state);
13050  	return 0;
13051  }
13052  
13053  /*
13054   * wait_phys_link_offline_quiet_substates - wait for any offline substate
13055   * @ppd: port device
13056   * @msecs: the number of milliseconds to wait
13057   *
13058   * Wait up to msecs milliseconds for any offline physical link
13059   * state change to occur.
13060   * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13061   */
wait_phys_link_offline_substates(struct hfi1_pportdata * ppd,int msecs)13062  static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
13063  					    int msecs)
13064  {
13065  	u32 read_state;
13066  	unsigned long timeout;
13067  
13068  	timeout = jiffies + msecs_to_jiffies(msecs);
13069  	while (1) {
13070  		read_state = read_physical_state(ppd->dd);
13071  		if ((read_state & 0xF0) == PLS_OFFLINE)
13072  			break;
13073  		if (time_after(jiffies, timeout)) {
13074  			dd_dev_err(ppd->dd,
13075  				   "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
13076  				   read_state, msecs);
13077  			return -ETIMEDOUT;
13078  		}
13079  		usleep_range(1950, 2050); /* sleep 2ms-ish */
13080  	}
13081  
13082  	log_state_transition(ppd, read_state);
13083  	return read_state;
13084  }
13085  
13086  /*
13087   * wait_phys_link_out_of_offline - wait for any out of offline state
13088   * @ppd: port device
13089   * @msecs: the number of milliseconds to wait
13090   *
13091   * Wait up to msecs milliseconds for any out of offline physical link
13092   * state change to occur.
13093   * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13094   */
wait_phys_link_out_of_offline(struct hfi1_pportdata * ppd,int msecs)13095  static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
13096  					 int msecs)
13097  {
13098  	u32 read_state;
13099  	unsigned long timeout;
13100  
13101  	timeout = jiffies + msecs_to_jiffies(msecs);
13102  	while (1) {
13103  		read_state = read_physical_state(ppd->dd);
13104  		if ((read_state & 0xF0) != PLS_OFFLINE)
13105  			break;
13106  		if (time_after(jiffies, timeout)) {
13107  			dd_dev_err(ppd->dd,
13108  				   "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
13109  				   read_state, msecs);
13110  			return -ETIMEDOUT;
13111  		}
13112  		usleep_range(1950, 2050); /* sleep 2ms-ish */
13113  	}
13114  
13115  	log_state_transition(ppd, read_state);
13116  	return read_state;
13117  }
13118  
13119  #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
13120  (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13121  
13122  #define SET_STATIC_RATE_CONTROL_SMASK(r) \
13123  (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13124  
hfi1_init_ctxt(struct send_context * sc)13125  void hfi1_init_ctxt(struct send_context *sc)
13126  {
13127  	if (sc) {
13128  		struct hfi1_devdata *dd = sc->dd;
13129  		u64 reg;
13130  		u8 set = (sc->type == SC_USER ?
13131  			  HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13132  			  HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13133  		reg = read_kctxt_csr(dd, sc->hw_context,
13134  				     SEND_CTXT_CHECK_ENABLE);
13135  		if (set)
13136  			CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13137  		else
13138  			SET_STATIC_RATE_CONTROL_SMASK(reg);
13139  		write_kctxt_csr(dd, sc->hw_context,
13140  				SEND_CTXT_CHECK_ENABLE, reg);
13141  	}
13142  }
13143  
hfi1_tempsense_rd(struct hfi1_devdata * dd,struct hfi1_temp * temp)13144  int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13145  {
13146  	int ret = 0;
13147  	u64 reg;
13148  
13149  	if (dd->icode != ICODE_RTL_SILICON) {
13150  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13151  			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13152  				    __func__);
13153  		return -EINVAL;
13154  	}
13155  	reg = read_csr(dd, ASIC_STS_THERM);
13156  	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13157  		      ASIC_STS_THERM_CURR_TEMP_MASK);
13158  	temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13159  			ASIC_STS_THERM_LO_TEMP_MASK);
13160  	temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13161  			ASIC_STS_THERM_HI_TEMP_MASK);
13162  	temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13163  			  ASIC_STS_THERM_CRIT_TEMP_MASK);
13164  	/* triggers is a 3-bit value - 1 bit per trigger. */
13165  	temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13166  
13167  	return ret;
13168  }
13169  
13170  /* ========================================================================= */
13171  
13172  /**
13173   * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13174   * @dd: valid devdata
13175   * @src: IRQ source to determine register index from
13176   * @bits: the bits to set or clear
13177   * @set: true == set the bits, false == clear the bits
13178   *
13179   */
read_mod_write(struct hfi1_devdata * dd,u16 src,u64 bits,bool set)13180  static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13181  			   bool set)
13182  {
13183  	u64 reg;
13184  	u16 idx = src / BITS_PER_REGISTER;
13185  
13186  	spin_lock(&dd->irq_src_lock);
13187  	reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13188  	if (set)
13189  		reg |= bits;
13190  	else
13191  		reg &= ~bits;
13192  	write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13193  	spin_unlock(&dd->irq_src_lock);
13194  }
13195  
13196  /**
13197   * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13198   * @dd: valid devdata
13199   * @first: first IRQ source to set/clear
13200   * @last: last IRQ source (inclusive) to set/clear
13201   * @set: true == set the bits, false == clear the bits
13202   *
13203   * If first == last, set the exact source.
13204   */
set_intr_bits(struct hfi1_devdata * dd,u16 first,u16 last,bool set)13205  int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13206  {
13207  	u64 bits = 0;
13208  	u64 bit;
13209  	u16 src;
13210  
13211  	if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13212  		return -EINVAL;
13213  
13214  	if (last < first)
13215  		return -ERANGE;
13216  
13217  	for (src = first; src <= last; src++) {
13218  		bit = src % BITS_PER_REGISTER;
13219  		/* wrapped to next register? */
13220  		if (!bit && bits) {
13221  			read_mod_write(dd, src - 1, bits, set);
13222  			bits = 0;
13223  		}
13224  		bits |= BIT_ULL(bit);
13225  	}
13226  	read_mod_write(dd, last, bits, set);
13227  
13228  	return 0;
13229  }
13230  
13231  /*
13232   * Clear all interrupt sources on the chip.
13233   */
clear_all_interrupts(struct hfi1_devdata * dd)13234  void clear_all_interrupts(struct hfi1_devdata *dd)
13235  {
13236  	int i;
13237  
13238  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13239  		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13240  
13241  	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13242  	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13243  	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13244  	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13245  	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13246  	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13247  	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13248  	for (i = 0; i < chip_send_contexts(dd); i++)
13249  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13250  	for (i = 0; i < chip_sdma_engines(dd); i++)
13251  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13252  
13253  	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13254  	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13255  	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13256  }
13257  
13258  /*
13259   * Remap the interrupt source from the general handler to the given MSI-X
13260   * interrupt.
13261   */
remap_intr(struct hfi1_devdata * dd,int isrc,int msix_intr)13262  void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13263  {
13264  	u64 reg;
13265  	int m, n;
13266  
13267  	/* clear from the handled mask of the general interrupt */
13268  	m = isrc / 64;
13269  	n = isrc % 64;
13270  	if (likely(m < CCE_NUM_INT_CSRS)) {
13271  		dd->gi_mask[m] &= ~((u64)1 << n);
13272  	} else {
13273  		dd_dev_err(dd, "remap interrupt err\n");
13274  		return;
13275  	}
13276  
13277  	/* direct the chip source to the given MSI-X interrupt */
13278  	m = isrc / 8;
13279  	n = isrc % 8;
13280  	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13281  	reg &= ~((u64)0xff << (8 * n));
13282  	reg |= ((u64)msix_intr & 0xff) << (8 * n);
13283  	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13284  }
13285  
remap_sdma_interrupts(struct hfi1_devdata * dd,int engine,int msix_intr)13286  void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13287  {
13288  	/*
13289  	 * SDMA engine interrupt sources grouped by type, rather than
13290  	 * engine.  Per-engine interrupts are as follows:
13291  	 *	SDMA
13292  	 *	SDMAProgress
13293  	 *	SDMAIdle
13294  	 */
13295  	remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13296  	remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13297  	remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13298  }
13299  
13300  /*
13301   * Set the general handler to accept all interrupts, remap all
13302   * chip interrupts back to MSI-X 0.
13303   */
reset_interrupts(struct hfi1_devdata * dd)13304  void reset_interrupts(struct hfi1_devdata *dd)
13305  {
13306  	int i;
13307  
13308  	/* all interrupts handled by the general handler */
13309  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13310  		dd->gi_mask[i] = ~(u64)0;
13311  
13312  	/* all chip interrupts map to MSI-X 0 */
13313  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13314  		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13315  }
13316  
13317  /**
13318   * set_up_interrupts() - Initialize the IRQ resources and state
13319   * @dd: valid devdata
13320   *
13321   */
set_up_interrupts(struct hfi1_devdata * dd)13322  static int set_up_interrupts(struct hfi1_devdata *dd)
13323  {
13324  	int ret;
13325  
13326  	/* mask all interrupts */
13327  	set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13328  
13329  	/* clear all pending interrupts */
13330  	clear_all_interrupts(dd);
13331  
13332  	/* reset general handler mask, chip MSI-X mappings */
13333  	reset_interrupts(dd);
13334  
13335  	/* ask for MSI-X interrupts */
13336  	ret = msix_initialize(dd);
13337  	if (ret)
13338  		return ret;
13339  
13340  	ret = msix_request_irqs(dd);
13341  	if (ret)
13342  		msix_clean_up_interrupts(dd);
13343  
13344  	return ret;
13345  }
13346  
13347  /*
13348   * Set up context values in dd.  Sets:
13349   *
13350   *	num_rcv_contexts - number of contexts being used
13351   *	n_krcv_queues - number of kernel contexts
13352   *	first_dyn_alloc_ctxt - first dynamically allocated context
13353   *                             in array of contexts
13354   *	freectxts  - number of free user contexts
13355   *	num_send_contexts - number of PIO send contexts being used
13356   *	num_netdev_contexts - number of contexts reserved for netdev
13357   */
set_up_context_variables(struct hfi1_devdata * dd)13358  static int set_up_context_variables(struct hfi1_devdata *dd)
13359  {
13360  	unsigned long num_kernel_contexts;
13361  	u16 num_netdev_contexts;
13362  	int ret;
13363  	unsigned ngroups;
13364  	int rmt_count;
13365  	int user_rmt_reduced;
13366  	u32 n_usr_ctxts;
13367  	u32 send_contexts = chip_send_contexts(dd);
13368  	u32 rcv_contexts = chip_rcv_contexts(dd);
13369  
13370  	/*
13371  	 * Kernel receive contexts:
13372  	 * - Context 0 - control context (VL15/multicast/error)
13373  	 * - Context 1 - first kernel context
13374  	 * - Context 2 - second kernel context
13375  	 * ...
13376  	 */
13377  	if (n_krcvqs)
13378  		/*
13379  		 * n_krcvqs is the sum of module parameter kernel receive
13380  		 * contexts, krcvqs[].  It does not include the control
13381  		 * context, so add that.
13382  		 */
13383  		num_kernel_contexts = n_krcvqs + 1;
13384  	else
13385  		num_kernel_contexts = DEFAULT_KRCVQS + 1;
13386  	/*
13387  	 * Every kernel receive context needs an ACK send context.
13388  	 * one send context is allocated for each VL{0-7} and VL15
13389  	 */
13390  	if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13391  		dd_dev_err(dd,
13392  			   "Reducing # kernel rcv contexts to: %d, from %lu\n",
13393  			   send_contexts - num_vls - 1,
13394  			   num_kernel_contexts);
13395  		num_kernel_contexts = send_contexts - num_vls - 1;
13396  	}
13397  
13398  	/*
13399  	 * User contexts:
13400  	 *	- default to 1 user context per real (non-HT) CPU core if
13401  	 *	  num_user_contexts is negative
13402  	 */
13403  	if (num_user_contexts < 0)
13404  		n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13405  	else
13406  		n_usr_ctxts = num_user_contexts;
13407  	/*
13408  	 * Adjust the counts given a global max.
13409  	 */
13410  	if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
13411  		dd_dev_err(dd,
13412  			   "Reducing # user receive contexts to: %u, from %u\n",
13413  			   (u32)(rcv_contexts - num_kernel_contexts),
13414  			   n_usr_ctxts);
13415  		/* recalculate */
13416  		n_usr_ctxts = rcv_contexts - num_kernel_contexts;
13417  	}
13418  
13419  	num_netdev_contexts =
13420  		hfi1_num_netdev_contexts(dd, rcv_contexts -
13421  					 (num_kernel_contexts + n_usr_ctxts),
13422  					 &node_affinity.real_cpu_mask);
13423  	/*
13424  	 * The RMT entries are currently allocated as shown below:
13425  	 * 1. QOS (0 to 128 entries);
13426  	 * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13427  	 *    num_netdev_contexts);
13428  	 * 3. netdev (num_netdev_contexts).
13429  	 * It should be noted that FECN oversubscribe num_netdev_contexts
13430  	 * entries of RMT because both netdev and PSM could allocate any receive
13431  	 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13432  	 * and PSM FECN must reserve an RMT entry for each possible PSM receive
13433  	 * context.
13434  	 */
13435  	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
13436  	if (HFI1_CAP_IS_KSET(TID_RDMA))
13437  		rmt_count += num_kernel_contexts - 1;
13438  	if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13439  		user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13440  		dd_dev_err(dd,
13441  			   "RMT size is reducing the number of user receive contexts from %u to %d\n",
13442  			   n_usr_ctxts,
13443  			   user_rmt_reduced);
13444  		/* recalculate */
13445  		n_usr_ctxts = user_rmt_reduced;
13446  	}
13447  
13448  	/* the first N are kernel contexts, the rest are user/netdev contexts */
13449  	dd->num_rcv_contexts =
13450  		num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
13451  	dd->n_krcv_queues = num_kernel_contexts;
13452  	dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13453  	dd->num_netdev_contexts = num_netdev_contexts;
13454  	dd->num_user_contexts = n_usr_ctxts;
13455  	dd->freectxts = n_usr_ctxts;
13456  	dd_dev_info(dd,
13457  		    "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
13458  		    rcv_contexts,
13459  		    (int)dd->num_rcv_contexts,
13460  		    (int)dd->n_krcv_queues,
13461  		    dd->num_netdev_contexts,
13462  		    dd->num_user_contexts);
13463  
13464  	/*
13465  	 * Receive array allocation:
13466  	 *   All RcvArray entries are divided into groups of 8. This
13467  	 *   is required by the hardware and will speed up writes to
13468  	 *   consecutive entries by using write-combining of the entire
13469  	 *   cacheline.
13470  	 *
13471  	 *   The number of groups are evenly divided among all contexts.
13472  	 *   any left over groups will be given to the first N user
13473  	 *   contexts.
13474  	 */
13475  	dd->rcv_entries.group_size = RCV_INCREMENT;
13476  	ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13477  	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13478  	dd->rcv_entries.nctxt_extra = ngroups -
13479  		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13480  	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13481  		    dd->rcv_entries.ngroups,
13482  		    dd->rcv_entries.nctxt_extra);
13483  	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13484  	    MAX_EAGER_ENTRIES * 2) {
13485  		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13486  			dd->rcv_entries.group_size;
13487  		dd_dev_info(dd,
13488  			    "RcvArray group count too high, change to %u\n",
13489  			    dd->rcv_entries.ngroups);
13490  		dd->rcv_entries.nctxt_extra = 0;
13491  	}
13492  	/*
13493  	 * PIO send contexts
13494  	 */
13495  	ret = init_sc_pools_and_sizes(dd);
13496  	if (ret >= 0) {	/* success */
13497  		dd->num_send_contexts = ret;
13498  		dd_dev_info(
13499  			dd,
13500  			"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13501  			send_contexts,
13502  			dd->num_send_contexts,
13503  			dd->sc_sizes[SC_KERNEL].count,
13504  			dd->sc_sizes[SC_ACK].count,
13505  			dd->sc_sizes[SC_USER].count,
13506  			dd->sc_sizes[SC_VL15].count);
13507  		ret = 0;	/* success */
13508  	}
13509  
13510  	return ret;
13511  }
13512  
13513  /*
13514   * Set the device/port partition key table. The MAD code
13515   * will ensure that, at least, the partial management
13516   * partition key is present in the table.
13517   */
set_partition_keys(struct hfi1_pportdata * ppd)13518  static void set_partition_keys(struct hfi1_pportdata *ppd)
13519  {
13520  	struct hfi1_devdata *dd = ppd->dd;
13521  	u64 reg = 0;
13522  	int i;
13523  
13524  	dd_dev_info(dd, "Setting partition keys\n");
13525  	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13526  		reg |= (ppd->pkeys[i] &
13527  			RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13528  			((i % 4) *
13529  			 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13530  		/* Each register holds 4 PKey values. */
13531  		if ((i % 4) == 3) {
13532  			write_csr(dd, RCV_PARTITION_KEY +
13533  				  ((i - 3) * 2), reg);
13534  			reg = 0;
13535  		}
13536  	}
13537  
13538  	/* Always enable HW pkeys check when pkeys table is set */
13539  	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13540  }
13541  
13542  /*
13543   * These CSRs and memories are uninitialized on reset and must be
13544   * written before reading to set the ECC/parity bits.
13545   *
13546   * NOTE: All user context CSRs that are not mmaped write-only
13547   * (e.g. the TID flows) must be initialized even if the driver never
13548   * reads them.
13549   */
write_uninitialized_csrs_and_memories(struct hfi1_devdata * dd)13550  static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13551  {
13552  	int i, j;
13553  
13554  	/* CceIntMap */
13555  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13556  		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13557  
13558  	/* SendCtxtCreditReturnAddr */
13559  	for (i = 0; i < chip_send_contexts(dd); i++)
13560  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13561  
13562  	/* PIO Send buffers */
13563  	/* SDMA Send buffers */
13564  	/*
13565  	 * These are not normally read, and (presently) have no method
13566  	 * to be read, so are not pre-initialized
13567  	 */
13568  
13569  	/* RcvHdrAddr */
13570  	/* RcvHdrTailAddr */
13571  	/* RcvTidFlowTable */
13572  	for (i = 0; i < chip_rcv_contexts(dd); i++) {
13573  		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13574  		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13575  		for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13576  			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13577  	}
13578  
13579  	/* RcvArray */
13580  	for (i = 0; i < chip_rcv_array_count(dd); i++)
13581  		hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13582  
13583  	/* RcvQPMapTable */
13584  	for (i = 0; i < 32; i++)
13585  		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13586  }
13587  
13588  /*
13589   * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13590   */
clear_cce_status(struct hfi1_devdata * dd,u64 status_bits,u64 ctrl_bits)13591  static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13592  			     u64 ctrl_bits)
13593  {
13594  	unsigned long timeout;
13595  	u64 reg;
13596  
13597  	/* is the condition present? */
13598  	reg = read_csr(dd, CCE_STATUS);
13599  	if ((reg & status_bits) == 0)
13600  		return;
13601  
13602  	/* clear the condition */
13603  	write_csr(dd, CCE_CTRL, ctrl_bits);
13604  
13605  	/* wait for the condition to clear */
13606  	timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13607  	while (1) {
13608  		reg = read_csr(dd, CCE_STATUS);
13609  		if ((reg & status_bits) == 0)
13610  			return;
13611  		if (time_after(jiffies, timeout)) {
13612  			dd_dev_err(dd,
13613  				   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13614  				   status_bits, reg & status_bits);
13615  			return;
13616  		}
13617  		udelay(1);
13618  	}
13619  }
13620  
13621  /* set CCE CSRs to chip reset defaults */
reset_cce_csrs(struct hfi1_devdata * dd)13622  static void reset_cce_csrs(struct hfi1_devdata *dd)
13623  {
13624  	int i;
13625  
13626  	/* CCE_REVISION read-only */
13627  	/* CCE_REVISION2 read-only */
13628  	/* CCE_CTRL - bits clear automatically */
13629  	/* CCE_STATUS read-only, use CceCtrl to clear */
13630  	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13631  	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13632  	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13633  	for (i = 0; i < CCE_NUM_SCRATCH; i++)
13634  		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13635  	/* CCE_ERR_STATUS read-only */
13636  	write_csr(dd, CCE_ERR_MASK, 0);
13637  	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13638  	/* CCE_ERR_FORCE leave alone */
13639  	for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13640  		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13641  	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13642  	/* CCE_PCIE_CTRL leave alone */
13643  	for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13644  		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13645  		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13646  			  CCE_MSIX_TABLE_UPPER_RESETCSR);
13647  	}
13648  	for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13649  		/* CCE_MSIX_PBA read-only */
13650  		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13651  		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13652  	}
13653  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13654  		write_csr(dd, CCE_INT_MAP, 0);
13655  	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13656  		/* CCE_INT_STATUS read-only */
13657  		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13658  		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13659  		/* CCE_INT_FORCE leave alone */
13660  		/* CCE_INT_BLOCKED read-only */
13661  	}
13662  	for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13663  		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13664  }
13665  
13666  /* set MISC CSRs to chip reset defaults */
reset_misc_csrs(struct hfi1_devdata * dd)13667  static void reset_misc_csrs(struct hfi1_devdata *dd)
13668  {
13669  	int i;
13670  
13671  	for (i = 0; i < 32; i++) {
13672  		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13673  		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13674  		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13675  	}
13676  	/*
13677  	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13678  	 * only be written 128-byte chunks
13679  	 */
13680  	/* init RSA engine to clear lingering errors */
13681  	write_csr(dd, MISC_CFG_RSA_CMD, 1);
13682  	write_csr(dd, MISC_CFG_RSA_MU, 0);
13683  	write_csr(dd, MISC_CFG_FW_CTRL, 0);
13684  	/* MISC_STS_8051_DIGEST read-only */
13685  	/* MISC_STS_SBM_DIGEST read-only */
13686  	/* MISC_STS_PCIE_DIGEST read-only */
13687  	/* MISC_STS_FAB_DIGEST read-only */
13688  	/* MISC_ERR_STATUS read-only */
13689  	write_csr(dd, MISC_ERR_MASK, 0);
13690  	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13691  	/* MISC_ERR_FORCE leave alone */
13692  }
13693  
13694  /* set TXE CSRs to chip reset defaults */
reset_txe_csrs(struct hfi1_devdata * dd)13695  static void reset_txe_csrs(struct hfi1_devdata *dd)
13696  {
13697  	int i;
13698  
13699  	/*
13700  	 * TXE Kernel CSRs
13701  	 */
13702  	write_csr(dd, SEND_CTRL, 0);
13703  	__cm_reset(dd, 0);	/* reset CM internal state */
13704  	/* SEND_CONTEXTS read-only */
13705  	/* SEND_DMA_ENGINES read-only */
13706  	/* SEND_PIO_MEM_SIZE read-only */
13707  	/* SEND_DMA_MEM_SIZE read-only */
13708  	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13709  	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
13710  	/* SEND_PIO_ERR_STATUS read-only */
13711  	write_csr(dd, SEND_PIO_ERR_MASK, 0);
13712  	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13713  	/* SEND_PIO_ERR_FORCE leave alone */
13714  	/* SEND_DMA_ERR_STATUS read-only */
13715  	write_csr(dd, SEND_DMA_ERR_MASK, 0);
13716  	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13717  	/* SEND_DMA_ERR_FORCE leave alone */
13718  	/* SEND_EGRESS_ERR_STATUS read-only */
13719  	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13720  	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13721  	/* SEND_EGRESS_ERR_FORCE leave alone */
13722  	write_csr(dd, SEND_BTH_QP, 0);
13723  	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13724  	write_csr(dd, SEND_SC2VLT0, 0);
13725  	write_csr(dd, SEND_SC2VLT1, 0);
13726  	write_csr(dd, SEND_SC2VLT2, 0);
13727  	write_csr(dd, SEND_SC2VLT3, 0);
13728  	write_csr(dd, SEND_LEN_CHECK0, 0);
13729  	write_csr(dd, SEND_LEN_CHECK1, 0);
13730  	/* SEND_ERR_STATUS read-only */
13731  	write_csr(dd, SEND_ERR_MASK, 0);
13732  	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13733  	/* SEND_ERR_FORCE read-only */
13734  	for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13735  		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13736  	for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13737  		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13738  	for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13739  		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13740  	for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13741  		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13742  	for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13743  		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13744  	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13745  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13746  	/* SEND_CM_CREDIT_USED_STATUS read-only */
13747  	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13748  	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13749  	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13750  	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13751  	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13752  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
13753  		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13754  	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13755  	/* SEND_CM_CREDIT_USED_VL read-only */
13756  	/* SEND_CM_CREDIT_USED_VL15 read-only */
13757  	/* SEND_EGRESS_CTXT_STATUS read-only */
13758  	/* SEND_EGRESS_SEND_DMA_STATUS read-only */
13759  	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13760  	/* SEND_EGRESS_ERR_INFO read-only */
13761  	/* SEND_EGRESS_ERR_SOURCE read-only */
13762  
13763  	/*
13764  	 * TXE Per-Context CSRs
13765  	 */
13766  	for (i = 0; i < chip_send_contexts(dd); i++) {
13767  		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13768  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13769  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13770  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13771  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13772  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13773  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13774  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13775  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13776  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13777  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13778  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13779  	}
13780  
13781  	/*
13782  	 * TXE Per-SDMA CSRs
13783  	 */
13784  	for (i = 0; i < chip_sdma_engines(dd); i++) {
13785  		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13786  		/* SEND_DMA_STATUS read-only */
13787  		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13788  		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13789  		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13790  		/* SEND_DMA_HEAD read-only */
13791  		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13792  		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13793  		/* SEND_DMA_IDLE_CNT read-only */
13794  		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13795  		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13796  		/* SEND_DMA_DESC_FETCHED_CNT read-only */
13797  		/* SEND_DMA_ENG_ERR_STATUS read-only */
13798  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13799  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13800  		/* SEND_DMA_ENG_ERR_FORCE leave alone */
13801  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13802  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13803  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13804  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13805  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13806  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13807  		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13808  	}
13809  }
13810  
13811  /*
13812   * Expect on entry:
13813   * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13814   */
init_rbufs(struct hfi1_devdata * dd)13815  static void init_rbufs(struct hfi1_devdata *dd)
13816  {
13817  	u64 reg;
13818  	int count;
13819  
13820  	/*
13821  	 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13822  	 * clear.
13823  	 */
13824  	count = 0;
13825  	while (1) {
13826  		reg = read_csr(dd, RCV_STATUS);
13827  		if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13828  			    | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13829  			break;
13830  		/*
13831  		 * Give up after 1ms - maximum wait time.
13832  		 *
13833  		 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13834  		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13835  		 *	136 KB / (66% * 250MB/s) = 844us
13836  		 */
13837  		if (count++ > 500) {
13838  			dd_dev_err(dd,
13839  				   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13840  				   __func__, reg);
13841  			break;
13842  		}
13843  		udelay(2); /* do not busy-wait the CSR */
13844  	}
13845  
13846  	/* start the init - expect RcvCtrl to be 0 */
13847  	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13848  
13849  	/*
13850  	 * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13851  	 * period after the write before RcvStatus.RxRbufInitDone is valid.
13852  	 * The delay in the first run through the loop below is sufficient and
13853  	 * required before the first read of RcvStatus.RxRbufInintDone.
13854  	 */
13855  	read_csr(dd, RCV_CTRL);
13856  
13857  	/* wait for the init to finish */
13858  	count = 0;
13859  	while (1) {
13860  		/* delay is required first time through - see above */
13861  		udelay(2); /* do not busy-wait the CSR */
13862  		reg = read_csr(dd, RCV_STATUS);
13863  		if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13864  			break;
13865  
13866  		/* give up after 100us - slowest possible at 33MHz is 73us */
13867  		if (count++ > 50) {
13868  			dd_dev_err(dd,
13869  				   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13870  				   __func__);
13871  			break;
13872  		}
13873  	}
13874  }
13875  
13876  /* set RXE CSRs to chip reset defaults */
reset_rxe_csrs(struct hfi1_devdata * dd)13877  static void reset_rxe_csrs(struct hfi1_devdata *dd)
13878  {
13879  	int i, j;
13880  
13881  	/*
13882  	 * RXE Kernel CSRs
13883  	 */
13884  	write_csr(dd, RCV_CTRL, 0);
13885  	init_rbufs(dd);
13886  	/* RCV_STATUS read-only */
13887  	/* RCV_CONTEXTS read-only */
13888  	/* RCV_ARRAY_CNT read-only */
13889  	/* RCV_BUF_SIZE read-only */
13890  	write_csr(dd, RCV_BTH_QP, 0);
13891  	write_csr(dd, RCV_MULTICAST, 0);
13892  	write_csr(dd, RCV_BYPASS, 0);
13893  	write_csr(dd, RCV_VL15, 0);
13894  	/* this is a clear-down */
13895  	write_csr(dd, RCV_ERR_INFO,
13896  		  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13897  	/* RCV_ERR_STATUS read-only */
13898  	write_csr(dd, RCV_ERR_MASK, 0);
13899  	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13900  	/* RCV_ERR_FORCE leave alone */
13901  	for (i = 0; i < 32; i++)
13902  		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13903  	for (i = 0; i < 4; i++)
13904  		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13905  	for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13906  		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13907  	for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13908  		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13909  	for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13910  		clear_rsm_rule(dd, i);
13911  	for (i = 0; i < 32; i++)
13912  		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13913  
13914  	/*
13915  	 * RXE Kernel and User Per-Context CSRs
13916  	 */
13917  	for (i = 0; i < chip_rcv_contexts(dd); i++) {
13918  		/* kernel */
13919  		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13920  		/* RCV_CTXT_STATUS read-only */
13921  		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13922  		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13923  		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13924  		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13925  		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13926  		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13927  		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13928  		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13929  		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13930  		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13931  
13932  		/* user */
13933  		/* RCV_HDR_TAIL read-only */
13934  		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13935  		/* RCV_EGR_INDEX_TAIL read-only */
13936  		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13937  		/* RCV_EGR_OFFSET_TAIL read-only */
13938  		for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13939  			write_uctxt_csr(dd, i,
13940  					RCV_TID_FLOW_TABLE + (8 * j), 0);
13941  		}
13942  	}
13943  }
13944  
13945  /*
13946   * Set sc2vl tables.
13947   *
13948   * They power on to zeros, so to avoid send context errors
13949   * they need to be set:
13950   *
13951   * SC 0-7 -> VL 0-7 (respectively)
13952   * SC 15  -> VL 15
13953   * otherwise
13954   *        -> VL 0
13955   */
init_sc2vl_tables(struct hfi1_devdata * dd)13956  static void init_sc2vl_tables(struct hfi1_devdata *dd)
13957  {
13958  	int i;
13959  	/* init per architecture spec, constrained by hardware capability */
13960  
13961  	/* HFI maps sent packets */
13962  	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13963  		0,
13964  		0, 0, 1, 1,
13965  		2, 2, 3, 3,
13966  		4, 4, 5, 5,
13967  		6, 6, 7, 7));
13968  	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13969  		1,
13970  		8, 0, 9, 0,
13971  		10, 0, 11, 0,
13972  		12, 0, 13, 0,
13973  		14, 0, 15, 15));
13974  	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13975  		2,
13976  		16, 0, 17, 0,
13977  		18, 0, 19, 0,
13978  		20, 0, 21, 0,
13979  		22, 0, 23, 0));
13980  	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13981  		3,
13982  		24, 0, 25, 0,
13983  		26, 0, 27, 0,
13984  		28, 0, 29, 0,
13985  		30, 0, 31, 0));
13986  
13987  	/* DC maps received packets */
13988  	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13989  		15_0,
13990  		0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13991  		8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13992  	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13993  		31_16,
13994  		16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13995  		24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13996  
13997  	/* initialize the cached sc2vl values consistently with h/w */
13998  	for (i = 0; i < 32; i++) {
13999  		if (i < 8 || i == 15)
14000  			*((u8 *)(dd->sc2vl) + i) = (u8)i;
14001  		else
14002  			*((u8 *)(dd->sc2vl) + i) = 0;
14003  	}
14004  }
14005  
14006  /*
14007   * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
14008   * depend on the chip going through a power-on reset - a driver may be loaded
14009   * and unloaded many times.
14010   *
14011   * Do not write any CSR values to the chip in this routine - there may be
14012   * a reset following the (possible) FLR in this routine.
14013   *
14014   */
init_chip(struct hfi1_devdata * dd)14015  static int init_chip(struct hfi1_devdata *dd)
14016  {
14017  	int i;
14018  	int ret = 0;
14019  
14020  	/*
14021  	 * Put the HFI CSRs in a known state.
14022  	 * Combine this with a DC reset.
14023  	 *
14024  	 * Stop the device from doing anything while we do a
14025  	 * reset.  We know there are no other active users of
14026  	 * the device since we are now in charge.  Turn off
14027  	 * off all outbound and inbound traffic and make sure
14028  	 * the device does not generate any interrupts.
14029  	 */
14030  
14031  	/* disable send contexts and SDMA engines */
14032  	write_csr(dd, SEND_CTRL, 0);
14033  	for (i = 0; i < chip_send_contexts(dd); i++)
14034  		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14035  	for (i = 0; i < chip_sdma_engines(dd); i++)
14036  		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14037  	/* disable port (turn off RXE inbound traffic) and contexts */
14038  	write_csr(dd, RCV_CTRL, 0);
14039  	for (i = 0; i < chip_rcv_contexts(dd); i++)
14040  		write_csr(dd, RCV_CTXT_CTRL, 0);
14041  	/* mask all interrupt sources */
14042  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14043  		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14044  
14045  	/*
14046  	 * DC Reset: do a full DC reset before the register clear.
14047  	 * A recommended length of time to hold is one CSR read,
14048  	 * so reread the CceDcCtrl.  Then, hold the DC in reset
14049  	 * across the clear.
14050  	 */
14051  	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14052  	(void)read_csr(dd, CCE_DC_CTRL);
14053  
14054  	if (use_flr) {
14055  		/*
14056  		 * A FLR will reset the SPC core and part of the PCIe.
14057  		 * The parts that need to be restored have already been
14058  		 * saved.
14059  		 */
14060  		dd_dev_info(dd, "Resetting CSRs with FLR\n");
14061  
14062  		/* do the FLR, the DC reset will remain */
14063  		pcie_flr(dd->pcidev);
14064  
14065  		/* restore command and BARs */
14066  		ret = restore_pci_variables(dd);
14067  		if (ret) {
14068  			dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14069  				   __func__);
14070  			return ret;
14071  		}
14072  
14073  		if (is_ax(dd)) {
14074  			dd_dev_info(dd, "Resetting CSRs with FLR\n");
14075  			pcie_flr(dd->pcidev);
14076  			ret = restore_pci_variables(dd);
14077  			if (ret) {
14078  				dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14079  					   __func__);
14080  				return ret;
14081  			}
14082  		}
14083  	} else {
14084  		dd_dev_info(dd, "Resetting CSRs with writes\n");
14085  		reset_cce_csrs(dd);
14086  		reset_txe_csrs(dd);
14087  		reset_rxe_csrs(dd);
14088  		reset_misc_csrs(dd);
14089  	}
14090  	/* clear the DC reset */
14091  	write_csr(dd, CCE_DC_CTRL, 0);
14092  
14093  	/* Set the LED off */
14094  	setextled(dd, 0);
14095  
14096  	/*
14097  	 * Clear the QSFP reset.
14098  	 * An FLR enforces a 0 on all out pins. The driver does not touch
14099  	 * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14100  	 * anything plugged constantly in reset, if it pays attention
14101  	 * to RESET_N.
14102  	 * Prime examples of this are optical cables. Set all pins high.
14103  	 * I2CCLK and I2CDAT will change per direction, and INT_N and
14104  	 * MODPRS_N are input only and their value is ignored.
14105  	 */
14106  	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14107  	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14108  	init_chip_resources(dd);
14109  	return ret;
14110  }
14111  
init_early_variables(struct hfi1_devdata * dd)14112  static void init_early_variables(struct hfi1_devdata *dd)
14113  {
14114  	int i;
14115  
14116  	/* assign link credit variables */
14117  	dd->vau = CM_VAU;
14118  	dd->link_credits = CM_GLOBAL_CREDITS;
14119  	if (is_ax(dd))
14120  		dd->link_credits--;
14121  	dd->vcu = cu_to_vcu(hfi1_cu);
14122  	/* enough room for 8 MAD packets plus header - 17K */
14123  	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14124  	if (dd->vl15_init > dd->link_credits)
14125  		dd->vl15_init = dd->link_credits;
14126  
14127  	write_uninitialized_csrs_and_memories(dd);
14128  
14129  	if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14130  		for (i = 0; i < dd->num_pports; i++) {
14131  			struct hfi1_pportdata *ppd = &dd->pport[i];
14132  
14133  			set_partition_keys(ppd);
14134  		}
14135  	init_sc2vl_tables(dd);
14136  }
14137  
init_kdeth_qp(struct hfi1_devdata * dd)14138  static void init_kdeth_qp(struct hfi1_devdata *dd)
14139  {
14140  	write_csr(dd, SEND_BTH_QP,
14141  		  (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
14142  		  SEND_BTH_QP_KDETH_QP_SHIFT);
14143  
14144  	write_csr(dd, RCV_BTH_QP,
14145  		  (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
14146  		  RCV_BTH_QP_KDETH_QP_SHIFT);
14147  }
14148  
14149  /**
14150   * hfi1_get_qp_map - get qp map
14151   * @dd: device data
14152   * @idx: index to read
14153   */
hfi1_get_qp_map(struct hfi1_devdata * dd,u8 idx)14154  u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14155  {
14156  	u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14157  
14158  	reg >>= (idx % 8) * 8;
14159  	return reg;
14160  }
14161  
14162  /**
14163   * init_qpmap_table - init qp map
14164   * @dd: device data
14165   * @first_ctxt: first context
14166   * @last_ctxt: first context
14167   *
14168   * This return sets the qpn mapping table that
14169   * is indexed by qpn[8:1].
14170   *
14171   * The routine will round robin the 256 settings
14172   * from first_ctxt to last_ctxt.
14173   *
14174   * The first/last looks ahead to having specialized
14175   * receive contexts for mgmt and bypass.  Normal
14176   * verbs traffic will assumed to be on a range
14177   * of receive contexts.
14178   */
init_qpmap_table(struct hfi1_devdata * dd,u32 first_ctxt,u32 last_ctxt)14179  static void init_qpmap_table(struct hfi1_devdata *dd,
14180  			     u32 first_ctxt,
14181  			     u32 last_ctxt)
14182  {
14183  	u64 reg = 0;
14184  	u64 regno = RCV_QP_MAP_TABLE;
14185  	int i;
14186  	u64 ctxt = first_ctxt;
14187  
14188  	for (i = 0; i < 256; i++) {
14189  		reg |= ctxt << (8 * (i % 8));
14190  		ctxt++;
14191  		if (ctxt > last_ctxt)
14192  			ctxt = first_ctxt;
14193  		if (i % 8 == 7) {
14194  			write_csr(dd, regno, reg);
14195  			reg = 0;
14196  			regno += 8;
14197  		}
14198  	}
14199  
14200  	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14201  			| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14202  }
14203  
14204  struct rsm_map_table {
14205  	u64 map[NUM_MAP_REGS];
14206  	unsigned int used;
14207  };
14208  
14209  struct rsm_rule_data {
14210  	u8 offset;
14211  	u8 pkt_type;
14212  	u32 field1_off;
14213  	u32 field2_off;
14214  	u32 index1_off;
14215  	u32 index1_width;
14216  	u32 index2_off;
14217  	u32 index2_width;
14218  	u32 mask1;
14219  	u32 value1;
14220  	u32 mask2;
14221  	u32 value2;
14222  };
14223  
14224  /*
14225   * Return an initialized RMT map table for users to fill in.  OK if it
14226   * returns NULL, indicating no table.
14227   */
alloc_rsm_map_table(struct hfi1_devdata * dd)14228  static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14229  {
14230  	struct rsm_map_table *rmt;
14231  	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14232  
14233  	rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14234  	if (rmt) {
14235  		memset(rmt->map, rxcontext, sizeof(rmt->map));
14236  		rmt->used = 0;
14237  	}
14238  
14239  	return rmt;
14240  }
14241  
14242  /*
14243   * Write the final RMT map table to the chip and free the table.  OK if
14244   * table is NULL.
14245   */
complete_rsm_map_table(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14246  static void complete_rsm_map_table(struct hfi1_devdata *dd,
14247  				   struct rsm_map_table *rmt)
14248  {
14249  	int i;
14250  
14251  	if (rmt) {
14252  		/* write table to chip */
14253  		for (i = 0; i < NUM_MAP_REGS; i++)
14254  			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14255  
14256  		/* enable RSM */
14257  		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14258  	}
14259  }
14260  
14261  /* Is a receive side mapping rule */
has_rsm_rule(struct hfi1_devdata * dd,u8 rule_index)14262  static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14263  {
14264  	return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
14265  }
14266  
14267  /*
14268   * Add a receive side mapping rule.
14269   */
add_rsm_rule(struct hfi1_devdata * dd,u8 rule_index,struct rsm_rule_data * rrd)14270  static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14271  			 struct rsm_rule_data *rrd)
14272  {
14273  	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14274  		  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14275  		  1ull << rule_index | /* enable bit */
14276  		  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14277  	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14278  		  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14279  		  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14280  		  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14281  		  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14282  		  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14283  		  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14284  	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14285  		  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14286  		  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14287  		  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14288  		  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14289  }
14290  
14291  /*
14292   * Clear a receive side mapping rule.
14293   */
clear_rsm_rule(struct hfi1_devdata * dd,u8 rule_index)14294  static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14295  {
14296  	write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14297  	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14298  	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14299  }
14300  
14301  /* return the number of RSM map table entries that will be used for QOS */
qos_rmt_entries(struct hfi1_devdata * dd,unsigned int * mp,unsigned int * np)14302  static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14303  			   unsigned int *np)
14304  {
14305  	int i;
14306  	unsigned int m, n;
14307  	u8 max_by_vl = 0;
14308  
14309  	/* is QOS active at all? */
14310  	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14311  	    num_vls == 1 ||
14312  	    krcvqsset <= 1)
14313  		goto no_qos;
14314  
14315  	/* determine bits for qpn */
14316  	for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14317  		if (krcvqs[i] > max_by_vl)
14318  			max_by_vl = krcvqs[i];
14319  	if (max_by_vl > 32)
14320  		goto no_qos;
14321  	m = ilog2(__roundup_pow_of_two(max_by_vl));
14322  
14323  	/* determine bits for vl */
14324  	n = ilog2(__roundup_pow_of_two(num_vls));
14325  
14326  	/* reject if too much is used */
14327  	if ((m + n) > 7)
14328  		goto no_qos;
14329  
14330  	if (mp)
14331  		*mp = m;
14332  	if (np)
14333  		*np = n;
14334  
14335  	return 1 << (m + n);
14336  
14337  no_qos:
14338  	if (mp)
14339  		*mp = 0;
14340  	if (np)
14341  		*np = 0;
14342  	return 0;
14343  }
14344  
14345  /**
14346   * init_qos - init RX qos
14347   * @dd: device data
14348   * @rmt: RSM map table
14349   *
14350   * This routine initializes Rule 0 and the RSM map table to implement
14351   * quality of service (qos).
14352   *
14353   * If all of the limit tests succeed, qos is applied based on the array
14354   * interpretation of krcvqs where entry 0 is VL0.
14355   *
14356   * The number of vl bits (n) and the number of qpn bits (m) are computed to
14357   * feed both the RSM map table and the single rule.
14358   */
init_qos(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14359  static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14360  {
14361  	struct rsm_rule_data rrd;
14362  	unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14363  	unsigned int rmt_entries;
14364  	u64 reg;
14365  
14366  	if (!rmt)
14367  		goto bail;
14368  	rmt_entries = qos_rmt_entries(dd, &m, &n);
14369  	if (rmt_entries == 0)
14370  		goto bail;
14371  	qpns_per_vl = 1 << m;
14372  
14373  	/* enough room in the map table? */
14374  	rmt_entries = 1 << (m + n);
14375  	if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14376  		goto bail;
14377  
14378  	/* add qos entries to the RSM map table */
14379  	for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14380  		unsigned tctxt;
14381  
14382  		for (qpn = 0, tctxt = ctxt;
14383  		     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14384  			unsigned idx, regoff, regidx;
14385  
14386  			/* generate the index the hardware will produce */
14387  			idx = rmt->used + ((qpn << n) ^ i);
14388  			regoff = (idx % 8) * 8;
14389  			regidx = idx / 8;
14390  			/* replace default with context number */
14391  			reg = rmt->map[regidx];
14392  			reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14393  				<< regoff);
14394  			reg |= (u64)(tctxt++) << regoff;
14395  			rmt->map[regidx] = reg;
14396  			if (tctxt == ctxt + krcvqs[i])
14397  				tctxt = ctxt;
14398  		}
14399  		ctxt += krcvqs[i];
14400  	}
14401  
14402  	rrd.offset = rmt->used;
14403  	rrd.pkt_type = 2;
14404  	rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14405  	rrd.field2_off = LRH_SC_MATCH_OFFSET;
14406  	rrd.index1_off = LRH_SC_SELECT_OFFSET;
14407  	rrd.index1_width = n;
14408  	rrd.index2_off = QPN_SELECT_OFFSET;
14409  	rrd.index2_width = m + n;
14410  	rrd.mask1 = LRH_BTH_MASK;
14411  	rrd.value1 = LRH_BTH_VALUE;
14412  	rrd.mask2 = LRH_SC_MASK;
14413  	rrd.value2 = LRH_SC_VALUE;
14414  
14415  	/* add rule 0 */
14416  	add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14417  
14418  	/* mark RSM map entries as used */
14419  	rmt->used += rmt_entries;
14420  	/* map everything else to the mcast/err/vl15 context */
14421  	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14422  	dd->qos_shift = n + 1;
14423  	return;
14424  bail:
14425  	dd->qos_shift = 1;
14426  	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14427  }
14428  
init_fecn_handling(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14429  static void init_fecn_handling(struct hfi1_devdata *dd,
14430  			       struct rsm_map_table *rmt)
14431  {
14432  	struct rsm_rule_data rrd;
14433  	u64 reg;
14434  	int i, idx, regoff, regidx, start;
14435  	u8 offset;
14436  	u32 total_cnt;
14437  
14438  	if (HFI1_CAP_IS_KSET(TID_RDMA))
14439  		/* Exclude context 0 */
14440  		start = 1;
14441  	else
14442  		start = dd->first_dyn_alloc_ctxt;
14443  
14444  	total_cnt = dd->num_rcv_contexts - start;
14445  
14446  	/* there needs to be enough room in the map table */
14447  	if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14448  		dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14449  		return;
14450  	}
14451  
14452  	/*
14453  	 * RSM will extract the destination context as an index into the
14454  	 * map table.  The destination contexts are a sequential block
14455  	 * in the range start...num_rcv_contexts-1 (inclusive).
14456  	 * Map entries are accessed as offset + extracted value.  Adjust
14457  	 * the added offset so this sequence can be placed anywhere in
14458  	 * the table - as long as the entries themselves do not wrap.
14459  	 * There are only enough bits in offset for the table size, so
14460  	 * start with that to allow for a "negative" offset.
14461  	 */
14462  	offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14463  
14464  	for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14465  	     i++, idx++) {
14466  		/* replace with identity mapping */
14467  		regoff = (idx % 8) * 8;
14468  		regidx = idx / 8;
14469  		reg = rmt->map[regidx];
14470  		reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14471  		reg |= (u64)i << regoff;
14472  		rmt->map[regidx] = reg;
14473  	}
14474  
14475  	/*
14476  	 * For RSM intercept of Expected FECN packets:
14477  	 * o packet type 0 - expected
14478  	 * o match on F (bit 95), using select/match 1, and
14479  	 * o match on SH (bit 133), using select/match 2.
14480  	 *
14481  	 * Use index 1 to extract the 8-bit receive context from DestQP
14482  	 * (start at bit 64).  Use that as the RSM map table index.
14483  	 */
14484  	rrd.offset = offset;
14485  	rrd.pkt_type = 0;
14486  	rrd.field1_off = 95;
14487  	rrd.field2_off = 133;
14488  	rrd.index1_off = 64;
14489  	rrd.index1_width = 8;
14490  	rrd.index2_off = 0;
14491  	rrd.index2_width = 0;
14492  	rrd.mask1 = 1;
14493  	rrd.value1 = 1;
14494  	rrd.mask2 = 1;
14495  	rrd.value2 = 1;
14496  
14497  	/* add rule 1 */
14498  	add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14499  
14500  	rmt->used += total_cnt;
14501  }
14502  
hfi1_is_rmt_full(int start,int spare)14503  static inline bool hfi1_is_rmt_full(int start, int spare)
14504  {
14505  	return (start + spare) > NUM_MAP_ENTRIES;
14506  }
14507  
hfi1_netdev_update_rmt(struct hfi1_devdata * dd)14508  static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
14509  {
14510  	u8 i, j;
14511  	u8 ctx_id = 0;
14512  	u64 reg;
14513  	u32 regoff;
14514  	int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14515  	int ctxt_count = hfi1_netdev_ctxt_count(dd);
14516  
14517  	/* We already have contexts mapped in RMT */
14518  	if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
14519  		dd_dev_info(dd, "Contexts are already mapped in RMT\n");
14520  		return true;
14521  	}
14522  
14523  	if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
14524  		dd_dev_err(dd, "Not enough RMT entries used = %d\n",
14525  			   rmt_start);
14526  		return false;
14527  	}
14528  
14529  	dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
14530  		rmt_start,
14531  		rmt_start + NUM_NETDEV_MAP_ENTRIES);
14532  
14533  	/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14534  	regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
14535  	reg = read_csr(dd, regoff);
14536  	for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
14537  		/* Update map register with netdev context */
14538  		j = (rmt_start + i) % 8;
14539  		reg &= ~(0xffllu << (j * 8));
14540  		reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
14541  		/* Wrap up netdev ctx index */
14542  		ctx_id %= ctxt_count;
14543  		/* Write back map register */
14544  		if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
14545  			dev_dbg(&(dd)->pcidev->dev,
14546  				"RMT[%d] =0x%llx\n",
14547  				regoff - RCV_RSM_MAP_TABLE, reg);
14548  
14549  			write_csr(dd, regoff, reg);
14550  			regoff += 8;
14551  			if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
14552  				reg = read_csr(dd, regoff);
14553  		}
14554  	}
14555  
14556  	return true;
14557  }
14558  
hfi1_enable_rsm_rule(struct hfi1_devdata * dd,int rule,struct rsm_rule_data * rrd)14559  static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
14560  				 int rule, struct rsm_rule_data *rrd)
14561  {
14562  	if (!hfi1_netdev_update_rmt(dd)) {
14563  		dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
14564  		return;
14565  	}
14566  
14567  	add_rsm_rule(dd, rule, rrd);
14568  	add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14569  }
14570  
hfi1_init_aip_rsm(struct hfi1_devdata * dd)14571  void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
14572  {
14573  	/*
14574  	 * go through with the initialisation only if this rule actually doesn't
14575  	 * exist yet
14576  	 */
14577  	if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
14578  		int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14579  		struct rsm_rule_data rrd = {
14580  			.offset = rmt_start,
14581  			.pkt_type = IB_PACKET_TYPE,
14582  			.field1_off = LRH_BTH_MATCH_OFFSET,
14583  			.mask1 = LRH_BTH_MASK,
14584  			.value1 = LRH_BTH_VALUE,
14585  			.field2_off = BTH_DESTQP_MATCH_OFFSET,
14586  			.mask2 = BTH_DESTQP_MASK,
14587  			.value2 = BTH_DESTQP_VALUE,
14588  			.index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
14589  					ilog2(NUM_NETDEV_MAP_ENTRIES),
14590  			.index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14591  			.index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
14592  			.index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14593  		};
14594  
14595  		hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
14596  	}
14597  }
14598  
14599  /* Initialize RSM for VNIC */
hfi1_init_vnic_rsm(struct hfi1_devdata * dd)14600  void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14601  {
14602  	int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14603  	struct rsm_rule_data rrd = {
14604  		/* Add rule for vnic */
14605  		.offset = rmt_start,
14606  		.pkt_type = 4,
14607  		/* Match 16B packets */
14608  		.field1_off = L2_TYPE_MATCH_OFFSET,
14609  		.mask1 = L2_TYPE_MASK,
14610  		.value1 = L2_16B_VALUE,
14611  		/* Match ETH L4 packets */
14612  		.field2_off = L4_TYPE_MATCH_OFFSET,
14613  		.mask2 = L4_16B_TYPE_MASK,
14614  		.value2 = L4_16B_ETH_VALUE,
14615  		/* Calc context from veswid and entropy */
14616  		.index1_off = L4_16B_HDR_VESWID_OFFSET,
14617  		.index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14618  		.index2_off = L2_16B_ENTROPY_OFFSET,
14619  		.index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14620  	};
14621  
14622  	hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14623  }
14624  
hfi1_deinit_vnic_rsm(struct hfi1_devdata * dd)14625  void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14626  {
14627  	clear_rsm_rule(dd, RSM_INS_VNIC);
14628  }
14629  
hfi1_deinit_aip_rsm(struct hfi1_devdata * dd)14630  void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
14631  {
14632  	/* only actually clear the rule if it's the last user asking to do so */
14633  	if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
14634  		clear_rsm_rule(dd, RSM_INS_AIP);
14635  }
14636  
init_rxe(struct hfi1_devdata * dd)14637  static int init_rxe(struct hfi1_devdata *dd)
14638  {
14639  	struct rsm_map_table *rmt;
14640  	u64 val;
14641  
14642  	/* enable all receive errors */
14643  	write_csr(dd, RCV_ERR_MASK, ~0ull);
14644  
14645  	rmt = alloc_rsm_map_table(dd);
14646  	if (!rmt)
14647  		return -ENOMEM;
14648  
14649  	/* set up QOS, including the QPN map table */
14650  	init_qos(dd, rmt);
14651  	init_fecn_handling(dd, rmt);
14652  	complete_rsm_map_table(dd, rmt);
14653  	/* record number of used rsm map entries for netdev */
14654  	hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
14655  	kfree(rmt);
14656  
14657  	/*
14658  	 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14659  	 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14660  	 * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14661  	 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14662  	 * Max_PayLoad_Size set to its minimum of 128.
14663  	 *
14664  	 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14665  	 * (64 bytes).  Max_Payload_Size is possibly modified upward in
14666  	 * tune_pcie_caps() which is called after this routine.
14667  	 */
14668  
14669  	/* Have 16 bytes (4DW) of bypass header available in header queue */
14670  	val = read_csr(dd, RCV_BYPASS);
14671  	val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14672  	val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14673  		RCV_BYPASS_HDR_SIZE_SHIFT);
14674  	write_csr(dd, RCV_BYPASS, val);
14675  	return 0;
14676  }
14677  
init_other(struct hfi1_devdata * dd)14678  static void init_other(struct hfi1_devdata *dd)
14679  {
14680  	/* enable all CCE errors */
14681  	write_csr(dd, CCE_ERR_MASK, ~0ull);
14682  	/* enable *some* Misc errors */
14683  	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14684  	/* enable all DC errors, except LCB */
14685  	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14686  	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14687  }
14688  
14689  /*
14690   * Fill out the given AU table using the given CU.  A CU is defined in terms
14691   * AUs.  The table is a an encoding: given the index, how many AUs does that
14692   * represent?
14693   *
14694   * NOTE: Assumes that the register layout is the same for the
14695   * local and remote tables.
14696   */
assign_cm_au_table(struct hfi1_devdata * dd,u32 cu,u32 csr0to3,u32 csr4to7)14697  static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14698  			       u32 csr0to3, u32 csr4to7)
14699  {
14700  	write_csr(dd, csr0to3,
14701  		  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14702  		  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14703  		  2ull * cu <<
14704  		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14705  		  4ull * cu <<
14706  		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14707  	write_csr(dd, csr4to7,
14708  		  8ull * cu <<
14709  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14710  		  16ull * cu <<
14711  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14712  		  32ull * cu <<
14713  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14714  		  64ull * cu <<
14715  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14716  }
14717  
assign_local_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14718  static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14719  {
14720  	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14721  			   SEND_CM_LOCAL_AU_TABLE4_TO7);
14722  }
14723  
assign_remote_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14724  void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14725  {
14726  	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14727  			   SEND_CM_REMOTE_AU_TABLE4_TO7);
14728  }
14729  
init_txe(struct hfi1_devdata * dd)14730  static void init_txe(struct hfi1_devdata *dd)
14731  {
14732  	int i;
14733  
14734  	/* enable all PIO, SDMA, general, and Egress errors */
14735  	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14736  	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14737  	write_csr(dd, SEND_ERR_MASK, ~0ull);
14738  	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14739  
14740  	/* enable all per-context and per-SDMA engine errors */
14741  	for (i = 0; i < chip_send_contexts(dd); i++)
14742  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14743  	for (i = 0; i < chip_sdma_engines(dd); i++)
14744  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14745  
14746  	/* set the local CU to AU mapping */
14747  	assign_local_cm_au_table(dd, dd->vcu);
14748  
14749  	/*
14750  	 * Set reasonable default for Credit Return Timer
14751  	 * Don't set on Simulator - causes it to choke.
14752  	 */
14753  	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14754  		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14755  }
14756  
hfi1_set_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 jkey)14757  int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14758  		       u16 jkey)
14759  {
14760  	u8 hw_ctxt;
14761  	u64 reg;
14762  
14763  	if (!rcd || !rcd->sc)
14764  		return -EINVAL;
14765  
14766  	hw_ctxt = rcd->sc->hw_context;
14767  	reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14768  		((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14769  		 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14770  	/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14771  	if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14772  		reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14773  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14774  	/*
14775  	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14776  	 */
14777  	if (!is_ax(dd)) {
14778  		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14779  		reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14780  		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14781  	}
14782  
14783  	/* Enable J_KEY check on receive context. */
14784  	reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14785  		((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14786  		 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14787  	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14788  
14789  	return 0;
14790  }
14791  
hfi1_clear_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)14792  int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14793  {
14794  	u8 hw_ctxt;
14795  	u64 reg;
14796  
14797  	if (!rcd || !rcd->sc)
14798  		return -EINVAL;
14799  
14800  	hw_ctxt = rcd->sc->hw_context;
14801  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14802  	/*
14803  	 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14804  	 * This check would not have been enabled for A0 h/w, see
14805  	 * set_ctxt_jkey().
14806  	 */
14807  	if (!is_ax(dd)) {
14808  		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14809  		reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14810  		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14811  	}
14812  	/* Turn off the J_KEY on the receive side */
14813  	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14814  
14815  	return 0;
14816  }
14817  
hfi1_set_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 pkey)14818  int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14819  		       u16 pkey)
14820  {
14821  	u8 hw_ctxt;
14822  	u64 reg;
14823  
14824  	if (!rcd || !rcd->sc)
14825  		return -EINVAL;
14826  
14827  	hw_ctxt = rcd->sc->hw_context;
14828  	reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14829  		SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14830  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14831  	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14832  	reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14833  	reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14834  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14835  
14836  	return 0;
14837  }
14838  
hfi1_clear_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * ctxt)14839  int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14840  {
14841  	u8 hw_ctxt;
14842  	u64 reg;
14843  
14844  	if (!ctxt || !ctxt->sc)
14845  		return -EINVAL;
14846  
14847  	hw_ctxt = ctxt->sc->hw_context;
14848  	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14849  	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14850  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14851  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14852  
14853  	return 0;
14854  }
14855  
14856  /*
14857   * Start doing the clean up the chip. Our clean up happens in multiple
14858   * stages and this is just the first.
14859   */
hfi1_start_cleanup(struct hfi1_devdata * dd)14860  void hfi1_start_cleanup(struct hfi1_devdata *dd)
14861  {
14862  	aspm_exit(dd);
14863  	free_cntrs(dd);
14864  	free_rcverr(dd);
14865  	finish_chip_resources(dd);
14866  }
14867  
14868  #define HFI_BASE_GUID(dev) \
14869  	((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14870  
14871  /*
14872   * Information can be shared between the two HFIs on the same ASIC
14873   * in the same OS.  This function finds the peer device and sets
14874   * up a shared structure.
14875   */
init_asic_data(struct hfi1_devdata * dd)14876  static int init_asic_data(struct hfi1_devdata *dd)
14877  {
14878  	unsigned long index;
14879  	struct hfi1_devdata *peer;
14880  	struct hfi1_asic_data *asic_data;
14881  	int ret = 0;
14882  
14883  	/* pre-allocate the asic structure in case we are the first device */
14884  	asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14885  	if (!asic_data)
14886  		return -ENOMEM;
14887  
14888  	xa_lock_irq(&hfi1_dev_table);
14889  	/* Find our peer device */
14890  	xa_for_each(&hfi1_dev_table, index, peer) {
14891  		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14892  		    dd->unit != peer->unit)
14893  			break;
14894  	}
14895  
14896  	if (peer) {
14897  		/* use already allocated structure */
14898  		dd->asic_data = peer->asic_data;
14899  		kfree(asic_data);
14900  	} else {
14901  		dd->asic_data = asic_data;
14902  		mutex_init(&dd->asic_data->asic_resource_mutex);
14903  	}
14904  	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14905  	xa_unlock_irq(&hfi1_dev_table);
14906  
14907  	/* first one through - set up i2c devices */
14908  	if (!peer)
14909  		ret = set_up_i2c(dd, dd->asic_data);
14910  
14911  	return ret;
14912  }
14913  
14914  /*
14915   * Set dd->boardname.  Use a generic name if a name is not returned from
14916   * EFI variable space.
14917   *
14918   * Return 0 on success, -ENOMEM if space could not be allocated.
14919   */
obtain_boardname(struct hfi1_devdata * dd)14920  static int obtain_boardname(struct hfi1_devdata *dd)
14921  {
14922  	/* generic board description */
14923  	const char generic[] =
14924  		"Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
14925  	unsigned long size;
14926  	int ret;
14927  
14928  	ret = read_hfi1_efi_var(dd, "description", &size,
14929  				(void **)&dd->boardname);
14930  	if (ret) {
14931  		dd_dev_info(dd, "Board description not found\n");
14932  		/* use generic description */
14933  		dd->boardname = kstrdup(generic, GFP_KERNEL);
14934  		if (!dd->boardname)
14935  			return -ENOMEM;
14936  	}
14937  	return 0;
14938  }
14939  
14940  /*
14941   * Check the interrupt registers to make sure that they are mapped correctly.
14942   * It is intended to help user identify any mismapping by VMM when the driver
14943   * is running in a VM. This function should only be called before interrupt
14944   * is set up properly.
14945   *
14946   * Return 0 on success, -EINVAL on failure.
14947   */
check_int_registers(struct hfi1_devdata * dd)14948  static int check_int_registers(struct hfi1_devdata *dd)
14949  {
14950  	u64 reg;
14951  	u64 all_bits = ~(u64)0;
14952  	u64 mask;
14953  
14954  	/* Clear CceIntMask[0] to avoid raising any interrupts */
14955  	mask = read_csr(dd, CCE_INT_MASK);
14956  	write_csr(dd, CCE_INT_MASK, 0ull);
14957  	reg = read_csr(dd, CCE_INT_MASK);
14958  	if (reg)
14959  		goto err_exit;
14960  
14961  	/* Clear all interrupt status bits */
14962  	write_csr(dd, CCE_INT_CLEAR, all_bits);
14963  	reg = read_csr(dd, CCE_INT_STATUS);
14964  	if (reg)
14965  		goto err_exit;
14966  
14967  	/* Set all interrupt status bits */
14968  	write_csr(dd, CCE_INT_FORCE, all_bits);
14969  	reg = read_csr(dd, CCE_INT_STATUS);
14970  	if (reg != all_bits)
14971  		goto err_exit;
14972  
14973  	/* Restore the interrupt mask */
14974  	write_csr(dd, CCE_INT_CLEAR, all_bits);
14975  	write_csr(dd, CCE_INT_MASK, mask);
14976  
14977  	return 0;
14978  err_exit:
14979  	write_csr(dd, CCE_INT_MASK, mask);
14980  	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14981  	return -EINVAL;
14982  }
14983  
14984  /**
14985   * hfi1_init_dd() - Initialize most of the dd structure.
14986   * @dd: the dd device
14987   *
14988   * This is global, and is called directly at init to set up the
14989   * chip-specific function pointers for later use.
14990   */
hfi1_init_dd(struct hfi1_devdata * dd)14991  int hfi1_init_dd(struct hfi1_devdata *dd)
14992  {
14993  	struct pci_dev *pdev = dd->pcidev;
14994  	struct hfi1_pportdata *ppd;
14995  	u64 reg;
14996  	int i, ret;
14997  	static const char * const inames[] = { /* implementation names */
14998  		"RTL silicon",
14999  		"RTL VCS simulation",
15000  		"RTL FPGA emulation",
15001  		"Functional simulator"
15002  	};
15003  	struct pci_dev *parent = pdev->bus->self;
15004  	u32 sdma_engines = chip_sdma_engines(dd);
15005  
15006  	ppd = dd->pport;
15007  	for (i = 0; i < dd->num_pports; i++, ppd++) {
15008  		int vl;
15009  		/* init common fields */
15010  		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15011  		/* DC supports 4 link widths */
15012  		ppd->link_width_supported =
15013  			OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15014  			OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15015  		ppd->link_width_downgrade_supported =
15016  			ppd->link_width_supported;
15017  		/* start out enabling only 4X */
15018  		ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15019  		ppd->link_width_downgrade_enabled =
15020  					ppd->link_width_downgrade_supported;
15021  		/* link width active is 0 when link is down */
15022  		/* link width downgrade active is 0 when link is down */
15023  
15024  		if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15025  		    num_vls > HFI1_MAX_VLS_SUPPORTED) {
15026  			dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
15027  				   num_vls, HFI1_MAX_VLS_SUPPORTED);
15028  			num_vls = HFI1_MAX_VLS_SUPPORTED;
15029  		}
15030  		ppd->vls_supported = num_vls;
15031  		ppd->vls_operational = ppd->vls_supported;
15032  		/* Set the default MTU. */
15033  		for (vl = 0; vl < num_vls; vl++)
15034  			dd->vld[vl].mtu = hfi1_max_mtu;
15035  		dd->vld[15].mtu = MAX_MAD_PACKET;
15036  		/*
15037  		 * Set the initial values to reasonable default, will be set
15038  		 * for real when link is up.
15039  		 */
15040  		ppd->overrun_threshold = 0x4;
15041  		ppd->phy_error_threshold = 0xf;
15042  		ppd->port_crc_mode_enabled = link_crc_mask;
15043  		/* initialize supported LTP CRC mode */
15044  		ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15045  		/* initialize enabled LTP CRC mode */
15046  		ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15047  		/* start in offline */
15048  		ppd->host_link_state = HLS_DN_OFFLINE;
15049  		init_vl_arb_caches(ppd);
15050  	}
15051  
15052  	/*
15053  	 * Do remaining PCIe setup and save PCIe values in dd.
15054  	 * Any error printing is already done by the init code.
15055  	 * On return, we have the chip mapped.
15056  	 */
15057  	ret = hfi1_pcie_ddinit(dd, pdev);
15058  	if (ret < 0)
15059  		goto bail_free;
15060  
15061  	/* Save PCI space registers to rewrite after device reset */
15062  	ret = save_pci_variables(dd);
15063  	if (ret < 0)
15064  		goto bail_cleanup;
15065  
15066  	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15067  			& CCE_REVISION_CHIP_REV_MAJOR_MASK;
15068  	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15069  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
15070  
15071  	/*
15072  	 * Check interrupt registers mapping if the driver has no access to
15073  	 * the upstream component. In this case, it is likely that the driver
15074  	 * is running in a VM.
15075  	 */
15076  	if (!parent) {
15077  		ret = check_int_registers(dd);
15078  		if (ret)
15079  			goto bail_cleanup;
15080  	}
15081  
15082  	/*
15083  	 * obtain the hardware ID - NOT related to unit, which is a
15084  	 * software enumeration
15085  	 */
15086  	reg = read_csr(dd, CCE_REVISION2);
15087  	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15088  					& CCE_REVISION2_HFI_ID_MASK;
15089  	/* the variable size will remove unwanted bits */
15090  	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15091  	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15092  	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15093  		    dd->icode < ARRAY_SIZE(inames) ?
15094  		    inames[dd->icode] : "unknown", (int)dd->irev);
15095  
15096  	/* speeds the hardware can support */
15097  	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15098  	/* speeds allowed to run at */
15099  	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15100  	/* give a reasonable active value, will be set on link up */
15101  	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15102  
15103  	/* fix up link widths for emulation _p */
15104  	ppd = dd->pport;
15105  	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15106  		ppd->link_width_supported =
15107  			ppd->link_width_enabled =
15108  			ppd->link_width_downgrade_supported =
15109  			ppd->link_width_downgrade_enabled =
15110  				OPA_LINK_WIDTH_1X;
15111  	}
15112  	/* insure num_vls isn't larger than number of sdma engines */
15113  	if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15114  		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15115  			   num_vls, sdma_engines);
15116  		num_vls = sdma_engines;
15117  		ppd->vls_supported = sdma_engines;
15118  		ppd->vls_operational = ppd->vls_supported;
15119  	}
15120  
15121  	/*
15122  	 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15123  	 * Limit the max if larger than the field holds.  If timeout is
15124  	 * non-zero, then the calculated field will be at least 1.
15125  	 *
15126  	 * Must be after icode is set up - the cclock rate depends
15127  	 * on knowing the hardware being used.
15128  	 */
15129  	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15130  	if (dd->rcv_intr_timeout_csr >
15131  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15132  		dd->rcv_intr_timeout_csr =
15133  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15134  	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15135  		dd->rcv_intr_timeout_csr = 1;
15136  
15137  	/* needs to be done before we look for the peer device */
15138  	read_guid(dd);
15139  
15140  	/* set up shared ASIC data with peer device */
15141  	ret = init_asic_data(dd);
15142  	if (ret)
15143  		goto bail_cleanup;
15144  
15145  	/* obtain chip sizes, reset chip CSRs */
15146  	ret = init_chip(dd);
15147  	if (ret)
15148  		goto bail_cleanup;
15149  
15150  	/* read in the PCIe link speed information */
15151  	ret = pcie_speeds(dd);
15152  	if (ret)
15153  		goto bail_cleanup;
15154  
15155  	/* call before get_platform_config(), after init_chip_resources() */
15156  	ret = eprom_init(dd);
15157  	if (ret)
15158  		goto bail_free_rcverr;
15159  
15160  	/* Needs to be called before hfi1_firmware_init */
15161  	get_platform_config(dd);
15162  
15163  	/* read in firmware */
15164  	ret = hfi1_firmware_init(dd);
15165  	if (ret)
15166  		goto bail_cleanup;
15167  
15168  	/*
15169  	 * In general, the PCIe Gen3 transition must occur after the
15170  	 * chip has been idled (so it won't initiate any PCIe transactions
15171  	 * e.g. an interrupt) and before the driver changes any registers
15172  	 * (the transition will reset the registers).
15173  	 *
15174  	 * In particular, place this call after:
15175  	 * - init_chip()     - the chip will not initiate any PCIe transactions
15176  	 * - pcie_speeds()   - reads the current link speed
15177  	 * - hfi1_firmware_init() - the needed firmware is ready to be
15178  	 *			    downloaded
15179  	 */
15180  	ret = do_pcie_gen3_transition(dd);
15181  	if (ret)
15182  		goto bail_cleanup;
15183  
15184  	/*
15185  	 * This should probably occur in hfi1_pcie_init(), but historically
15186  	 * occurs after the do_pcie_gen3_transition() code.
15187  	 */
15188  	tune_pcie_caps(dd);
15189  
15190  	/* start setting dd values and adjusting CSRs */
15191  	init_early_variables(dd);
15192  
15193  	parse_platform_config(dd);
15194  
15195  	ret = obtain_boardname(dd);
15196  	if (ret)
15197  		goto bail_cleanup;
15198  
15199  	snprintf(dd->boardversion, BOARD_VERS_MAX,
15200  		 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15201  		 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15202  		 (u32)dd->majrev,
15203  		 (u32)dd->minrev,
15204  		 (dd->revision >> CCE_REVISION_SW_SHIFT)
15205  		    & CCE_REVISION_SW_MASK);
15206  
15207  	/* alloc VNIC/AIP rx data */
15208  	ret = hfi1_alloc_rx(dd);
15209  	if (ret)
15210  		goto bail_cleanup;
15211  
15212  	ret = set_up_context_variables(dd);
15213  	if (ret)
15214  		goto bail_cleanup;
15215  
15216  	/* set initial RXE CSRs */
15217  	ret = init_rxe(dd);
15218  	if (ret)
15219  		goto bail_cleanup;
15220  
15221  	/* set initial TXE CSRs */
15222  	init_txe(dd);
15223  	/* set initial non-RXE, non-TXE CSRs */
15224  	init_other(dd);
15225  	/* set up KDETH QP prefix in both RX and TX CSRs */
15226  	init_kdeth_qp(dd);
15227  
15228  	ret = hfi1_dev_affinity_init(dd);
15229  	if (ret)
15230  		goto bail_cleanup;
15231  
15232  	/* send contexts must be set up before receive contexts */
15233  	ret = init_send_contexts(dd);
15234  	if (ret)
15235  		goto bail_cleanup;
15236  
15237  	ret = hfi1_create_kctxts(dd);
15238  	if (ret)
15239  		goto bail_cleanup;
15240  
15241  	/*
15242  	 * Initialize aspm, to be done after gen3 transition and setting up
15243  	 * contexts and before enabling interrupts
15244  	 */
15245  	aspm_init(dd);
15246  
15247  	ret = init_pervl_scs(dd);
15248  	if (ret)
15249  		goto bail_cleanup;
15250  
15251  	/* sdma init */
15252  	for (i = 0; i < dd->num_pports; ++i) {
15253  		ret = sdma_init(dd, i);
15254  		if (ret)
15255  			goto bail_cleanup;
15256  	}
15257  
15258  	/* use contexts created by hfi1_create_kctxts */
15259  	ret = set_up_interrupts(dd);
15260  	if (ret)
15261  		goto bail_cleanup;
15262  
15263  	ret = hfi1_comp_vectors_set_up(dd);
15264  	if (ret)
15265  		goto bail_clear_intr;
15266  
15267  	/* set up LCB access - must be after set_up_interrupts() */
15268  	init_lcb_access(dd);
15269  
15270  	/*
15271  	 * Serial number is created from the base guid:
15272  	 * [27:24] = base guid [38:35]
15273  	 * [23: 0] = base guid [23: 0]
15274  	 */
15275  	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15276  		 (dd->base_guid & 0xFFFFFF) |
15277  		     ((dd->base_guid >> 11) & 0xF000000));
15278  
15279  	dd->oui1 = dd->base_guid >> 56 & 0xFF;
15280  	dd->oui2 = dd->base_guid >> 48 & 0xFF;
15281  	dd->oui3 = dd->base_guid >> 40 & 0xFF;
15282  
15283  	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15284  	if (ret)
15285  		goto bail_clear_intr;
15286  
15287  	thermal_init(dd);
15288  
15289  	ret = init_cntrs(dd);
15290  	if (ret)
15291  		goto bail_clear_intr;
15292  
15293  	ret = init_rcverr(dd);
15294  	if (ret)
15295  		goto bail_free_cntrs;
15296  
15297  	init_completion(&dd->user_comp);
15298  
15299  	/* The user refcount starts with one to inidicate an active device */
15300  	refcount_set(&dd->user_refcount, 1);
15301  
15302  	goto bail;
15303  
15304  bail_free_rcverr:
15305  	free_rcverr(dd);
15306  bail_free_cntrs:
15307  	free_cntrs(dd);
15308  bail_clear_intr:
15309  	hfi1_comp_vectors_clean_up(dd);
15310  	msix_clean_up_interrupts(dd);
15311  bail_cleanup:
15312  	hfi1_free_rx(dd);
15313  	hfi1_pcie_ddcleanup(dd);
15314  bail_free:
15315  	hfi1_free_devdata(dd);
15316  bail:
15317  	return ret;
15318  }
15319  
delay_cycles(struct hfi1_pportdata * ppd,u32 desired_egress_rate,u32 dw_len)15320  static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15321  			u32 dw_len)
15322  {
15323  	u32 delta_cycles;
15324  	u32 current_egress_rate = ppd->current_egress_rate;
15325  	/* rates here are in units of 10^6 bits/sec */
15326  
15327  	if (desired_egress_rate == -1)
15328  		return 0; /* shouldn't happen */
15329  
15330  	if (desired_egress_rate >= current_egress_rate)
15331  		return 0; /* we can't help go faster, only slower */
15332  
15333  	delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15334  			egress_cycles(dw_len * 4, current_egress_rate);
15335  
15336  	return (u16)delta_cycles;
15337  }
15338  
15339  /**
15340   * create_pbc - build a pbc for transmission
15341   * @ppd: info of physical Hfi port
15342   * @flags: special case flags or-ed in built pbc
15343   * @srate_mbs: static rate
15344   * @vl: vl
15345   * @dw_len: dword length (header words + data words + pbc words)
15346   *
15347   * Create a PBC with the given flags, rate, VL, and length.
15348   *
15349   * NOTE: The PBC created will not insert any HCRC - all callers but one are
15350   * for verbs, which does not use this PSM feature.  The lone other caller
15351   * is for the diagnostic interface which calls this if the user does not
15352   * supply their own PBC.
15353   */
create_pbc(struct hfi1_pportdata * ppd,u64 flags,int srate_mbs,u32 vl,u32 dw_len)15354  u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15355  	       u32 dw_len)
15356  {
15357  	u64 pbc, delay = 0;
15358  
15359  	if (unlikely(srate_mbs))
15360  		delay = delay_cycles(ppd, srate_mbs, dw_len);
15361  
15362  	pbc = flags
15363  		| (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15364  		| ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15365  		| (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15366  		| (dw_len & PBC_LENGTH_DWS_MASK)
15367  			<< PBC_LENGTH_DWS_SHIFT;
15368  
15369  	return pbc;
15370  }
15371  
15372  #define SBUS_THERMAL    0x4f
15373  #define SBUS_THERM_MONITOR_MODE 0x1
15374  
15375  #define THERM_FAILURE(dev, ret, reason) \
15376  	dd_dev_err((dd),						\
15377  		   "Thermal sensor initialization failed: %s (%d)\n",	\
15378  		   (reason), (ret))
15379  
15380  /*
15381   * Initialize the thermal sensor.
15382   *
15383   * After initialization, enable polling of thermal sensor through
15384   * SBus interface. In order for this to work, the SBus Master
15385   * firmware has to be loaded due to the fact that the HW polling
15386   * logic uses SBus interrupts, which are not supported with
15387   * default firmware. Otherwise, no data will be returned through
15388   * the ASIC_STS_THERM CSR.
15389   */
thermal_init(struct hfi1_devdata * dd)15390  static int thermal_init(struct hfi1_devdata *dd)
15391  {
15392  	int ret = 0;
15393  
15394  	if (dd->icode != ICODE_RTL_SILICON ||
15395  	    check_chip_resource(dd, CR_THERM_INIT, NULL))
15396  		return ret;
15397  
15398  	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15399  	if (ret) {
15400  		THERM_FAILURE(dd, ret, "Acquire SBus");
15401  		return ret;
15402  	}
15403  
15404  	dd_dev_info(dd, "Initializing thermal sensor\n");
15405  	/* Disable polling of thermal readings */
15406  	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15407  	msleep(100);
15408  	/* Thermal Sensor Initialization */
15409  	/*    Step 1: Reset the Thermal SBus Receiver */
15410  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15411  				RESET_SBUS_RECEIVER, 0);
15412  	if (ret) {
15413  		THERM_FAILURE(dd, ret, "Bus Reset");
15414  		goto done;
15415  	}
15416  	/*    Step 2: Set Reset bit in Thermal block */
15417  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15418  				WRITE_SBUS_RECEIVER, 0x1);
15419  	if (ret) {
15420  		THERM_FAILURE(dd, ret, "Therm Block Reset");
15421  		goto done;
15422  	}
15423  	/*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15424  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15425  				WRITE_SBUS_RECEIVER, 0x32);
15426  	if (ret) {
15427  		THERM_FAILURE(dd, ret, "Write Clock Div");
15428  		goto done;
15429  	}
15430  	/*    Step 4: Select temperature mode */
15431  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15432  				WRITE_SBUS_RECEIVER,
15433  				SBUS_THERM_MONITOR_MODE);
15434  	if (ret) {
15435  		THERM_FAILURE(dd, ret, "Write Mode Sel");
15436  		goto done;
15437  	}
15438  	/*    Step 5: De-assert block reset and start conversion */
15439  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15440  				WRITE_SBUS_RECEIVER, 0x2);
15441  	if (ret) {
15442  		THERM_FAILURE(dd, ret, "Write Reset Deassert");
15443  		goto done;
15444  	}
15445  	/*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15446  	msleep(22);
15447  
15448  	/* Enable polling of thermal readings */
15449  	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15450  
15451  	/* Set initialized flag */
15452  	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15453  	if (ret)
15454  		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15455  
15456  done:
15457  	release_chip_resource(dd, CR_SBUS);
15458  	return ret;
15459  }
15460  
handle_temp_err(struct hfi1_devdata * dd)15461  static void handle_temp_err(struct hfi1_devdata *dd)
15462  {
15463  	struct hfi1_pportdata *ppd = &dd->pport[0];
15464  	/*
15465  	 * Thermal Critical Interrupt
15466  	 * Put the device into forced freeze mode, take link down to
15467  	 * offline, and put DC into reset.
15468  	 */
15469  	dd_dev_emerg(dd,
15470  		     "Critical temperature reached! Forcing device into freeze mode!\n");
15471  	dd->flags |= HFI1_FORCED_FREEZE;
15472  	start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15473  	/*
15474  	 * Shut DC down as much and as quickly as possible.
15475  	 *
15476  	 * Step 1: Take the link down to OFFLINE. This will cause the
15477  	 *         8051 to put the Serdes in reset. However, we don't want to
15478  	 *         go through the entire link state machine since we want to
15479  	 *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15480  	 *         but rather an attempt to save the chip.
15481  	 *         Code below is almost the same as quiet_serdes() but avoids
15482  	 *         all the extra work and the sleeps.
15483  	 */
15484  	ppd->driver_link_ready = 0;
15485  	ppd->link_enabled = 0;
15486  	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15487  				PLS_OFFLINE);
15488  	/*
15489  	 * Step 2: Shutdown LCB and 8051
15490  	 *         After shutdown, do not restore DC_CFG_RESET value.
15491  	 */
15492  	dc_shutdown(dd);
15493  }
15494