1  /*
2   * Copyright(c) 2015 - 2018 Intel Corporation.
3   *
4   * This file is provided under a dual BSD/GPLv2 license.  When using or
5   * redistributing this file, you may do so under either license.
6   *
7   * GPL LICENSE SUMMARY
8   *
9   * This program is free software; you can redistribute it and/or modify
10   * it under the terms of version 2 of the GNU General Public License as
11   * published by the Free Software Foundation.
12   *
13   * This program is distributed in the hope that it will be useful, but
14   * WITHOUT ANY WARRANTY; without even the implied warranty of
15   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   * General Public License for more details.
17   *
18   * BSD LICENSE
19   *
20   * Redistribution and use in source and binary forms, with or without
21   * modification, are permitted provided that the following conditions
22   * are met:
23   *
24   *  - Redistributions of source code must retain the above copyright
25   *    notice, this list of conditions and the following disclaimer.
26   *  - Redistributions in binary form must reproduce the above copyright
27   *    notice, this list of conditions and the following disclaimer in
28   *    the documentation and/or other materials provided with the
29   *    distribution.
30   *  - Neither the name of Intel Corporation nor the names of its
31   *    contributors may be used to endorse or promote products derived
32   *    from this software without specific prior written permission.
33   *
34   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35   * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36   * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37   * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38   * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40   * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45   *
46   */
47  
48  /*
49   * This file contains all of the code that is specific to the HFI chip
50   */
51  
52  #include <linux/pci.h>
53  #include <linux/delay.h>
54  #include <linux/interrupt.h>
55  #include <linux/module.h>
56  
57  #include "hfi.h"
58  #include "trace.h"
59  #include "mad.h"
60  #include "pio.h"
61  #include "sdma.h"
62  #include "eprom.h"
63  #include "efivar.h"
64  #include "platform.h"
65  #include "aspm.h"
66  #include "affinity.h"
67  #include "debugfs.h"
68  #include "fault.h"
69  
70  uint kdeth_qp;
71  module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72  MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73  
74  uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75  module_param(num_vls, uint, S_IRUGO);
76  MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77  
78  /*
79   * Default time to aggregate two 10K packets from the idle state
80   * (timer not running). The timer starts at the end of the first packet,
81   * so only the time for one 10K packet and header plus a bit extra is needed.
82   * 10 * 1024 + 64 header byte = 10304 byte
83   * 10304 byte / 12.5 GB/s = 824.32ns
84   */
85  uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86  module_param(rcv_intr_timeout, uint, S_IRUGO);
87  MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88  
89  uint rcv_intr_count = 16; /* same as qib */
90  module_param(rcv_intr_count, uint, S_IRUGO);
91  MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92  
93  ushort link_crc_mask = SUPPORTED_CRCS;
94  module_param(link_crc_mask, ushort, S_IRUGO);
95  MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96  
97  uint loopback;
98  module_param_named(loopback, loopback, uint, S_IRUGO);
99  MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100  
101  /* Other driver tunables */
102  uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103  static ushort crc_14b_sideband = 1;
104  static uint use_flr = 1;
105  uint quick_linkup; /* skip LNI */
106  
107  struct flag_table {
108  	u64 flag;	/* the flag */
109  	char *str;	/* description string */
110  	u16 extra;	/* extra information */
111  	u16 unused0;
112  	u32 unused1;
113  };
114  
115  /* str must be a string constant */
116  #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117  #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118  
119  /* Send Error Consequences */
120  #define SEC_WRITE_DROPPED	0x1
121  #define SEC_PACKET_DROPPED	0x2
122  #define SEC_SC_HALTED		0x4	/* per-context only */
123  #define SEC_SPC_FREEZE		0x8	/* per-HFI only */
124  
125  #define DEFAULT_KRCVQS		  2
126  #define MIN_KERNEL_KCTXTS         2
127  #define FIRST_KERNEL_KCTXT        1
128  
129  /*
130   * RSM instance allocation
131   *   0 - Verbs
132   *   1 - User Fecn Handling
133   *   2 - Vnic
134   */
135  #define RSM_INS_VERBS             0
136  #define RSM_INS_FECN              1
137  #define RSM_INS_VNIC              2
138  
139  /* Bit offset into the GUID which carries HFI id information */
140  #define GUID_HFI_INDEX_SHIFT     39
141  
142  /* extract the emulation revision */
143  #define emulator_rev(dd) ((dd)->irev >> 8)
144  /* parallel and serial emulation versions are 3 and 4 respectively */
145  #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146  #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147  
148  /* RSM fields for Verbs */
149  /* packet type */
150  #define IB_PACKET_TYPE         2ull
151  #define QW_SHIFT               6ull
152  /* QPN[7..1] */
153  #define QPN_WIDTH              7ull
154  
155  /* LRH.BTH: QW 0, OFFSET 48 - for match */
156  #define LRH_BTH_QW             0ull
157  #define LRH_BTH_BIT_OFFSET     48ull
158  #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
159  #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160  #define LRH_BTH_SELECT
161  #define LRH_BTH_MASK           3ull
162  #define LRH_BTH_VALUE          2ull
163  
164  /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165  #define LRH_SC_QW              0ull
166  #define LRH_SC_BIT_OFFSET      56ull
167  #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
168  #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169  #define LRH_SC_MASK            128ull
170  #define LRH_SC_VALUE           0ull
171  
172  /* SC[n..0] QW 0, OFFSET 60 - for select */
173  #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
174  
175  /* QPN[m+n:1] QW 1, OFFSET 1 */
176  #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
177  
178  /* RSM fields for Vnic */
179  /* L2_TYPE: QW 0, OFFSET 61 - for match */
180  #define L2_TYPE_QW             0ull
181  #define L2_TYPE_BIT_OFFSET     61ull
182  #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
183  #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184  #define L2_TYPE_MASK           3ull
185  #define L2_16B_VALUE           2ull
186  
187  /* L4_TYPE QW 1, OFFSET 0 - for match */
188  #define L4_TYPE_QW              1ull
189  #define L4_TYPE_BIT_OFFSET      0ull
190  #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
191  #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192  #define L4_16B_TYPE_MASK        0xFFull
193  #define L4_16B_ETH_VALUE        0x78ull
194  
195  /* 16B VESWID - for select */
196  #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
197  /* 16B ENTROPY - for select */
198  #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
199  
200  /* defines to build power on SC2VL table */
201  #define SC2VL_VAL( \
202  	num, \
203  	sc0, sc0val, \
204  	sc1, sc1val, \
205  	sc2, sc2val, \
206  	sc3, sc3val, \
207  	sc4, sc4val, \
208  	sc5, sc5val, \
209  	sc6, sc6val, \
210  	sc7, sc7val) \
211  ( \
212  	((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213  	((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214  	((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215  	((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216  	((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217  	((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218  	((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219  	((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
220  )
221  
222  #define DC_SC_VL_VAL( \
223  	range, \
224  	e0, e0val, \
225  	e1, e1val, \
226  	e2, e2val, \
227  	e3, e3val, \
228  	e4, e4val, \
229  	e5, e5val, \
230  	e6, e6val, \
231  	e7, e7val, \
232  	e8, e8val, \
233  	e9, e9val, \
234  	e10, e10val, \
235  	e11, e11val, \
236  	e12, e12val, \
237  	e13, e13val, \
238  	e14, e14val, \
239  	e15, e15val) \
240  ( \
241  	((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242  	((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243  	((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244  	((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245  	((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246  	((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247  	((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248  	((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249  	((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250  	((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251  	((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252  	((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253  	((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254  	((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255  	((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256  	((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257  )
258  
259  /* all CceStatus sub-block freeze bits */
260  #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261  			| CCE_STATUS_RXE_FROZE_SMASK \
262  			| CCE_STATUS_TXE_FROZE_SMASK \
263  			| CCE_STATUS_TXE_PIO_FROZE_SMASK)
264  /* all CceStatus sub-block TXE pause bits */
265  #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266  			| CCE_STATUS_TXE_PAUSED_SMASK \
267  			| CCE_STATUS_SDMA_PAUSED_SMASK)
268  /* all CceStatus sub-block RXE pause bits */
269  #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270  
271  #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272  #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273  
274  /*
275   * CCE Error flags.
276   */
277  static struct flag_table cce_err_status_flags[] = {
278  /* 0*/	FLAG_ENTRY0("CceCsrParityErr",
279  		CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280  /* 1*/	FLAG_ENTRY0("CceCsrReadBadAddrErr",
281  		CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282  /* 2*/	FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283  		CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284  /* 3*/	FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285  		CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286  /* 4*/	FLAG_ENTRY0("CceTrgtAccessErr",
287  		CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288  /* 5*/	FLAG_ENTRY0("CceRspdDataParityErr",
289  		CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290  /* 6*/	FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291  		CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292  /* 7*/	FLAG_ENTRY0("CceCsrCfgBusParityErr",
293  		CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294  /* 8*/	FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295  		CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296  /* 9*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298  /*10*/	FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300  /*11*/	FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301  	    CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302  /*12*/	FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303  		CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304  /*13*/	FLAG_ENTRY0("PcicRetryMemCorErr",
305  		CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306  /*14*/	FLAG_ENTRY0("PcicRetryMemCorErr",
307  		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308  /*15*/	FLAG_ENTRY0("PcicPostHdQCorErr",
309  		CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310  /*16*/	FLAG_ENTRY0("PcicPostHdQCorErr",
311  		CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312  /*17*/	FLAG_ENTRY0("PcicPostHdQCorErr",
313  		CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314  /*18*/	FLAG_ENTRY0("PcicCplDatQCorErr",
315  		CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316  /*19*/	FLAG_ENTRY0("PcicNPostHQParityErr",
317  		CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318  /*20*/	FLAG_ENTRY0("PcicNPostDatQParityErr",
319  		CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320  /*21*/	FLAG_ENTRY0("PcicRetryMemUncErr",
321  		CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322  /*22*/	FLAG_ENTRY0("PcicRetrySotMemUncErr",
323  		CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324  /*23*/	FLAG_ENTRY0("PcicPostHdQUncErr",
325  		CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326  /*24*/	FLAG_ENTRY0("PcicPostDatQUncErr",
327  		CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328  /*25*/	FLAG_ENTRY0("PcicCplHdQUncErr",
329  		CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330  /*26*/	FLAG_ENTRY0("PcicCplDatQUncErr",
331  		CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332  /*27*/	FLAG_ENTRY0("PcicTransmitFrontParityErr",
333  		CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334  /*28*/	FLAG_ENTRY0("PcicTransmitBackParityErr",
335  		CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336  /*29*/	FLAG_ENTRY0("PcicReceiveParityErr",
337  		CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338  /*30*/	FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339  		CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340  /*31*/	FLAG_ENTRY0("LATriggered",
341  		CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342  /*32*/	FLAG_ENTRY0("CceSegReadBadAddrErr",
343  		CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344  /*33*/	FLAG_ENTRY0("CceSegWriteBadAddrErr",
345  		CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346  /*34*/	FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347  		CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348  /*35*/	FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349  		CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350  /*36*/	FLAG_ENTRY0("CceMsixTableCorErr",
351  		CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352  /*37*/	FLAG_ENTRY0("CceMsixTableUncErr",
353  		CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354  /*38*/	FLAG_ENTRY0("CceIntMapCorErr",
355  		CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356  /*39*/	FLAG_ENTRY0("CceIntMapUncErr",
357  		CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358  /*40*/	FLAG_ENTRY0("CceMsixCsrParityErr",
359  		CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360  /*41-63 reserved*/
361  };
362  
363  /*
364   * Misc Error flags
365   */
366  #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367  static struct flag_table misc_err_status_flags[] = {
368  /* 0*/	FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369  /* 1*/	FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370  /* 2*/	FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371  /* 3*/	FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372  /* 4*/	FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373  /* 5*/	FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374  /* 6*/	FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375  /* 7*/	FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376  /* 8*/	FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377  /* 9*/	FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378  /*10*/	FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379  /*11*/	FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380  /*12*/	FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381  };
382  
383  /*
384   * TXE PIO Error flags and consequences
385   */
386  static struct flag_table pio_err_status_flags[] = {
387  /* 0*/	FLAG_ENTRY("PioWriteBadCtxt",
388  	SEC_WRITE_DROPPED,
389  	SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390  /* 1*/	FLAG_ENTRY("PioWriteAddrParity",
391  	SEC_SPC_FREEZE,
392  	SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393  /* 2*/	FLAG_ENTRY("PioCsrParity",
394  	SEC_SPC_FREEZE,
395  	SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396  /* 3*/	FLAG_ENTRY("PioSbMemFifo0",
397  	SEC_SPC_FREEZE,
398  	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399  /* 4*/	FLAG_ENTRY("PioSbMemFifo1",
400  	SEC_SPC_FREEZE,
401  	SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402  /* 5*/	FLAG_ENTRY("PioPccFifoParity",
403  	SEC_SPC_FREEZE,
404  	SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405  /* 6*/	FLAG_ENTRY("PioPecFifoParity",
406  	SEC_SPC_FREEZE,
407  	SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408  /* 7*/	FLAG_ENTRY("PioSbrdctlCrrelParity",
409  	SEC_SPC_FREEZE,
410  	SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411  /* 8*/	FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412  	SEC_SPC_FREEZE,
413  	SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414  /* 9*/	FLAG_ENTRY("PioPktEvictFifoParityErr",
415  	SEC_SPC_FREEZE,
416  	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417  /*10*/	FLAG_ENTRY("PioSmPktResetParity",
418  	SEC_SPC_FREEZE,
419  	SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420  /*11*/	FLAG_ENTRY("PioVlLenMemBank0Unc",
421  	SEC_SPC_FREEZE,
422  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423  /*12*/	FLAG_ENTRY("PioVlLenMemBank1Unc",
424  	SEC_SPC_FREEZE,
425  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426  /*13*/	FLAG_ENTRY("PioVlLenMemBank0Cor",
427  	0,
428  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429  /*14*/	FLAG_ENTRY("PioVlLenMemBank1Cor",
430  	0,
431  	SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432  /*15*/	FLAG_ENTRY("PioCreditRetFifoParity",
433  	SEC_SPC_FREEZE,
434  	SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435  /*16*/	FLAG_ENTRY("PioPpmcPblFifo",
436  	SEC_SPC_FREEZE,
437  	SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438  /*17*/	FLAG_ENTRY("PioInitSmIn",
439  	0,
440  	SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441  /*18*/	FLAG_ENTRY("PioPktEvictSmOrArbSm",
442  	SEC_SPC_FREEZE,
443  	SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444  /*19*/	FLAG_ENTRY("PioHostAddrMemUnc",
445  	SEC_SPC_FREEZE,
446  	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447  /*20*/	FLAG_ENTRY("PioHostAddrMemCor",
448  	0,
449  	SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450  /*21*/	FLAG_ENTRY("PioWriteDataParity",
451  	SEC_SPC_FREEZE,
452  	SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453  /*22*/	FLAG_ENTRY("PioStateMachine",
454  	SEC_SPC_FREEZE,
455  	SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456  /*23*/	FLAG_ENTRY("PioWriteQwValidParity",
457  	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458  	SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459  /*24*/	FLAG_ENTRY("PioBlockQwCountParity",
460  	SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461  	SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462  /*25*/	FLAG_ENTRY("PioVlfVlLenParity",
463  	SEC_SPC_FREEZE,
464  	SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465  /*26*/	FLAG_ENTRY("PioVlfSopParity",
466  	SEC_SPC_FREEZE,
467  	SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468  /*27*/	FLAG_ENTRY("PioVlFifoParity",
469  	SEC_SPC_FREEZE,
470  	SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471  /*28*/	FLAG_ENTRY("PioPpmcBqcMemParity",
472  	SEC_SPC_FREEZE,
473  	SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474  /*29*/	FLAG_ENTRY("PioPpmcSopLen",
475  	SEC_SPC_FREEZE,
476  	SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477  /*30-31 reserved*/
478  /*32*/	FLAG_ENTRY("PioCurrentFreeCntParity",
479  	SEC_SPC_FREEZE,
480  	SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481  /*33*/	FLAG_ENTRY("PioLastReturnedCntParity",
482  	SEC_SPC_FREEZE,
483  	SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484  /*34*/	FLAG_ENTRY("PioPccSopHeadParity",
485  	SEC_SPC_FREEZE,
486  	SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487  /*35*/	FLAG_ENTRY("PioPecSopHeadParityErr",
488  	SEC_SPC_FREEZE,
489  	SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490  /*36-63 reserved*/
491  };
492  
493  /* TXE PIO errors that cause an SPC freeze */
494  #define ALL_PIO_FREEZE_ERR \
495  	(SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496  	| SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497  	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498  	| SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499  	| SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500  	| SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501  	| SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502  	| SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503  	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504  	| SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505  	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506  	| SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507  	| SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508  	| SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509  	| SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510  	| SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511  	| SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512  	| SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513  	| SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514  	| SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515  	| SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516  	| SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517  	| SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518  	| SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519  	| SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520  	| SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521  	| SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522  	| SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523  	| SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524  
525  /*
526   * TXE SDMA Error flags
527   */
528  static struct flag_table sdma_err_status_flags[] = {
529  /* 0*/	FLAG_ENTRY0("SDmaRpyTagErr",
530  		SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531  /* 1*/	FLAG_ENTRY0("SDmaCsrParityErr",
532  		SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533  /* 2*/	FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534  		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535  /* 3*/	FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536  		SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537  /*04-63 reserved*/
538  };
539  
540  /* TXE SDMA errors that cause an SPC freeze */
541  #define ALL_SDMA_FREEZE_ERR  \
542  		(SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543  		| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544  		| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545  
546  /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547  #define PORT_DISCARD_EGRESS_ERRS \
548  	(SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549  	| SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550  	| SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551  
552  /*
553   * TXE Egress Error flags
554   */
555  #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556  static struct flag_table egress_err_status_flags[] = {
557  /* 0*/	FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558  /* 1*/	FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559  /* 2 reserved */
560  /* 3*/	FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561  		SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562  /* 4*/	FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563  /* 5*/	FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564  /* 6 reserved */
565  /* 7*/	FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566  		SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567  /* 8*/	FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568  		SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569  /* 9-10 reserved */
570  /*11*/	FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571  		SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572  /*12*/	FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573  /*13*/	FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574  /*14*/	FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575  /*15*/	FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576  /*16*/	FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577  		SEES(TX_SDMA0_DISALLOWED_PACKET)),
578  /*17*/	FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579  		SEES(TX_SDMA1_DISALLOWED_PACKET)),
580  /*18*/	FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581  		SEES(TX_SDMA2_DISALLOWED_PACKET)),
582  /*19*/	FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583  		SEES(TX_SDMA3_DISALLOWED_PACKET)),
584  /*20*/	FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585  		SEES(TX_SDMA4_DISALLOWED_PACKET)),
586  /*21*/	FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587  		SEES(TX_SDMA5_DISALLOWED_PACKET)),
588  /*22*/	FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589  		SEES(TX_SDMA6_DISALLOWED_PACKET)),
590  /*23*/	FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591  		SEES(TX_SDMA7_DISALLOWED_PACKET)),
592  /*24*/	FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593  		SEES(TX_SDMA8_DISALLOWED_PACKET)),
594  /*25*/	FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595  		SEES(TX_SDMA9_DISALLOWED_PACKET)),
596  /*26*/	FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597  		SEES(TX_SDMA10_DISALLOWED_PACKET)),
598  /*27*/	FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599  		SEES(TX_SDMA11_DISALLOWED_PACKET)),
600  /*28*/	FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601  		SEES(TX_SDMA12_DISALLOWED_PACKET)),
602  /*29*/	FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603  		SEES(TX_SDMA13_DISALLOWED_PACKET)),
604  /*30*/	FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605  		SEES(TX_SDMA14_DISALLOWED_PACKET)),
606  /*31*/	FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607  		SEES(TX_SDMA15_DISALLOWED_PACKET)),
608  /*32*/	FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609  		SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610  /*33*/	FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611  		SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612  /*34*/	FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613  		SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614  /*35*/	FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615  		SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616  /*36*/	FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617  		SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618  /*37*/	FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619  		SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620  /*38*/	FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621  		SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622  /*39*/	FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623  		SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624  /*40*/	FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625  		SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626  /*41*/	FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627  /*42*/	FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628  /*43*/	FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629  /*44*/	FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630  /*45*/	FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631  /*46*/	FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632  /*47*/	FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633  /*48*/	FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634  /*49*/	FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635  /*50*/	FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636  /*51*/	FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637  /*52*/	FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638  /*53*/	FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639  /*54*/	FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640  /*55*/	FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641  /*56*/	FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642  /*57*/	FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643  /*58*/	FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644  /*59*/	FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645  /*60*/	FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646  /*61*/	FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647  /*62*/	FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648  		SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649  /*63*/	FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650  		SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651  };
652  
653  /*
654   * TXE Egress Error Info flags
655   */
656  #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657  static struct flag_table egress_err_info_flags[] = {
658  /* 0*/	FLAG_ENTRY0("Reserved", 0ull),
659  /* 1*/	FLAG_ENTRY0("VLErr", SEEI(VL)),
660  /* 2*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661  /* 3*/	FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662  /* 4*/	FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663  /* 5*/	FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664  /* 6*/	FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665  /* 7*/	FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666  /* 8*/	FLAG_ENTRY0("RawErr", SEEI(RAW)),
667  /* 9*/	FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668  /*10*/	FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669  /*11*/	FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670  /*12*/	FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671  /*13*/	FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672  /*14*/	FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673  /*15*/	FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674  /*16*/	FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675  /*17*/	FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676  /*18*/	FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677  /*19*/	FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678  /*20*/	FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679  /*21*/	FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680  };
681  
682  /* TXE Egress errors that cause an SPC freeze */
683  #define ALL_TXE_EGRESS_FREEZE_ERR \
684  	(SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685  	| SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686  	| SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687  	| SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688  	| SEES(TX_LAUNCH_CSR_PARITY) \
689  	| SEES(TX_SBRD_CTL_CSR_PARITY) \
690  	| SEES(TX_CONFIG_PARITY) \
691  	| SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692  	| SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693  	| SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694  	| SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695  	| SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696  	| SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697  	| SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698  	| SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699  	| SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700  	| SEES(TX_CREDIT_RETURN_PARITY))
701  
702  /*
703   * TXE Send error flags
704   */
705  #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706  static struct flag_table send_err_status_flags[] = {
707  /* 0*/	FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708  /* 1*/	FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709  /* 2*/	FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710  };
711  
712  /*
713   * TXE Send Context Error flags and consequences
714   */
715  static struct flag_table sc_err_status_flags[] = {
716  /* 0*/	FLAG_ENTRY("InconsistentSop",
717  		SEC_PACKET_DROPPED | SEC_SC_HALTED,
718  		SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719  /* 1*/	FLAG_ENTRY("DisallowedPacket",
720  		SEC_PACKET_DROPPED | SEC_SC_HALTED,
721  		SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722  /* 2*/	FLAG_ENTRY("WriteCrossesBoundary",
723  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
724  		SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725  /* 3*/	FLAG_ENTRY("WriteOverflow",
726  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
727  		SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728  /* 4*/	FLAG_ENTRY("WriteOutOfBounds",
729  		SEC_WRITE_DROPPED | SEC_SC_HALTED,
730  		SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731  /* 5-63 reserved*/
732  };
733  
734  /*
735   * RXE Receive Error flags
736   */
737  #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738  static struct flag_table rxe_err_status_flags[] = {
739  /* 0*/	FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740  /* 1*/	FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741  /* 2*/	FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742  /* 3*/	FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743  /* 4*/	FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744  /* 5*/	FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745  /* 6*/	FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746  /* 7*/	FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747  /* 8*/	FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748  /* 9*/	FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749  /*10*/	FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750  /*11*/	FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751  /*12*/	FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752  /*13*/	FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753  /*14*/	FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754  /*15*/	FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755  /*16*/	FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756  		RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757  /*17*/	FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758  /*18*/	FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759  /*19*/	FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760  		RXES(RBUF_BLOCK_LIST_READ_UNC)),
761  /*20*/	FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762  		RXES(RBUF_BLOCK_LIST_READ_COR)),
763  /*21*/	FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764  		RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765  /*22*/	FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766  		RXES(RBUF_CSR_QENT_CNT_PARITY)),
767  /*23*/	FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768  		RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769  /*24*/	FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770  		RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771  /*25*/	FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772  /*26*/	FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773  /*27*/	FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774  		RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775  /*28*/	FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776  /*29*/	FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777  /*30*/	FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778  /*31*/	FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779  /*32*/	FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780  /*33*/	FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781  /*34*/	FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782  /*35*/	FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783  		RXES(RBUF_FL_INITDONE_PARITY)),
784  /*36*/	FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785  		RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786  /*37*/	FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787  /*38*/	FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788  /*39*/	FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789  /*40*/	FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790  		RXES(LOOKUP_DES_PART1_UNC_COR)),
791  /*41*/	FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792  		RXES(LOOKUP_DES_PART2_PARITY)),
793  /*42*/	FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794  /*43*/	FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795  /*44*/	FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796  /*45*/	FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797  /*46*/	FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798  /*47*/	FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799  /*48*/	FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800  /*49*/	FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801  /*50*/	FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802  /*51*/	FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803  /*52*/	FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804  /*53*/	FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805  /*54*/	FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806  /*55*/	FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807  /*56*/	FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808  /*57*/	FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809  /*58*/	FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810  /*59*/	FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811  /*60*/	FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812  /*61*/	FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813  /*62*/	FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814  /*63*/	FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815  };
816  
817  /* RXE errors that will trigger an SPC freeze */
818  #define ALL_RXE_FREEZE_ERR  \
819  	(RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820  	| RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821  	| RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822  	| RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823  	| RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826  	| RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827  	| RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828  	| RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829  	| RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830  	| RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831  	| RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832  	| RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833  	| RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834  	| RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835  	| RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836  	| RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837  	| RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838  	| RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839  	| RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840  	| RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841  	| RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842  	| RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843  	| RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844  	| RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847  	| RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848  	| RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849  	| RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850  	| RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851  	| RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854  	| RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855  	| RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856  	| RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857  	| RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858  	| RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859  	| RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860  	| RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861  	| RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862  	| RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863  
864  #define RXE_FREEZE_ABORT_MASK \
865  	(RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866  	RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867  	RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868  
869  /*
870   * DCC Error Flags
871   */
872  #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873  static struct flag_table dcc_err_flags[] = {
874  	FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875  	FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876  	FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877  	FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878  	FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879  	FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880  	FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881  	FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882  	FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883  	FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884  	FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885  	FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886  	FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887  	FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888  	FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889  	FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890  	FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891  	FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892  	FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893  	FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894  	FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895  	FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896  	FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897  	FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898  	FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899  	FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900  	FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901  	FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902  	FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903  	FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904  	FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905  	FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906  	FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907  	FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908  	FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909  	FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910  	FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911  	FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912  	FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913  	FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914  	FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915  	FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916  	FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917  	FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918  	FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919  	FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920  };
921  
922  /*
923   * LCB error flags
924   */
925  #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926  static struct flag_table lcb_err_flags[] = {
927  /* 0*/	FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928  /* 1*/	FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929  /* 2*/	FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930  /* 3*/	FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931  		LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932  /* 4*/	FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933  /* 5*/	FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934  /* 6*/	FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935  /* 7*/	FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936  /* 8*/	FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937  /* 9*/	FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938  /*10*/	FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939  /*11*/	FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940  /*12*/	FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941  /*13*/	FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942  		LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943  /*14*/	FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944  /*15*/	FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945  /*16*/	FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946  /*17*/	FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947  /*18*/	FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948  /*19*/	FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949  		LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950  /*20*/	FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951  /*21*/	FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952  /*22*/	FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953  /*23*/	FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954  /*24*/	FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955  /*25*/	FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956  /*26*/	FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957  		LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958  /*27*/	FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959  /*28*/	FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960  		LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961  /*29*/	FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962  		LCBE(REDUNDANT_FLIT_PARITY_ERR))
963  };
964  
965  /*
966   * DC8051 Error Flags
967   */
968  #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969  static struct flag_table dc8051_err_flags[] = {
970  	FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971  	FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972  	FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973  	FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974  	FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975  	FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976  	FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977  	FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978  	FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979  		    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980  	FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981  };
982  
983  /*
984   * DC8051 Information Error flags
985   *
986   * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
987   */
988  static struct flag_table dc8051_info_err_flags[] = {
989  	FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
990  	FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
991  	FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
992  	FLAG_ENTRY0("Serdes internal loopback failure",
993  		    FAILED_SERDES_INTERNAL_LOOPBACK),
994  	FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
995  	FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
996  	FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
997  	FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
998  	FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
999  	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000  	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001  	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002  	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003  	FLAG_ENTRY0("External Device Request Timeout",
1004  		    EXTERNAL_DEVICE_REQ_TIMEOUT),
1005  };
1006  
1007  /*
1008   * DC8051 Information Host Information flags
1009   *
1010   * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011   */
1012  static struct flag_table dc8051_info_host_msg_flags[] = {
1013  	FLAG_ENTRY0("Host request done", 0x0001),
1014  	FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015  	FLAG_ENTRY0("BC SMA message", 0x0004),
1016  	FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017  	FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018  	FLAG_ENTRY0("External device config request", 0x0020),
1019  	FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020  	FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021  	FLAG_ENTRY0("Link going down", 0x0100),
1022  	FLAG_ENTRY0("Link width downgraded", 0x0200),
1023  };
1024  
1025  static u32 encoded_size(u32 size);
1026  static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027  static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028  static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029  			       u8 *continuous);
1030  static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031  				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032  static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033  				      u8 *remote_tx_rate, u16 *link_widths);
1034  static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035  				    u8 *flag_bits, u16 *link_widths);
1036  static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037  				  u8 *device_rev);
1038  static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039  static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040  			    u8 *tx_polarity_inversion,
1041  			    u8 *rx_polarity_inversion, u8 *max_rate);
1042  static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043  				unsigned int context, u64 err_status);
1044  static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045  static void handle_dcc_err(struct hfi1_devdata *dd,
1046  			   unsigned int context, u64 err_status);
1047  static void handle_lcb_err(struct hfi1_devdata *dd,
1048  			   unsigned int context, u64 err_status);
1049  static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050  static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051  static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052  static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053  static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054  static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055  static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056  static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057  static void set_partition_keys(struct hfi1_pportdata *ppd);
1058  static const char *link_state_name(u32 state);
1059  static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060  					  u32 state);
1061  static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062  			   u64 *out_data);
1063  static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064  static int thermal_init(struct hfi1_devdata *dd);
1065  
1066  static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067  static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068  					    int msecs);
1069  static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070  				  int msecs);
1071  static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072  static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073  static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074  				   int msecs);
1075  static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076  					 int msecs);
1077  static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078  static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079  static void handle_temp_err(struct hfi1_devdata *dd);
1080  static void dc_shutdown(struct hfi1_devdata *dd);
1081  static void dc_start(struct hfi1_devdata *dd);
1082  static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083  			   unsigned int *np);
1084  static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085  static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086  static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087  static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088  
1089  /*
1090   * Error interrupt table entry.  This is used as input to the interrupt
1091   * "clear down" routine used for all second tier error interrupt register.
1092   * Second tier interrupt registers have a single bit representing them
1093   * in the top-level CceIntStatus.
1094   */
1095  struct err_reg_info {
1096  	u32 status;		/* status CSR offset */
1097  	u32 clear;		/* clear CSR offset */
1098  	u32 mask;		/* mask CSR offset */
1099  	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100  	const char *desc;
1101  };
1102  
1103  #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104  #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105  #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106  
1107  /*
1108   * Helpers for building HFI and DC error interrupt table entries.  Different
1109   * helpers are needed because of inconsistent register names.
1110   */
1111  #define EE(reg, handler, desc) \
1112  	{ reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113  		handler, desc }
1114  #define DC_EE1(reg, handler, desc) \
1115  	{ reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116  #define DC_EE2(reg, handler, desc) \
1117  	{ reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118  
1119  /*
1120   * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121   * another register containing more information.
1122   */
1123  static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124  /* 0*/	EE(CCE_ERR,		handle_cce_err,    "CceErr"),
1125  /* 1*/	EE(RCV_ERR,		handle_rxe_err,    "RxeErr"),
1126  /* 2*/	EE(MISC_ERR,	handle_misc_err,   "MiscErr"),
1127  /* 3*/	{ 0, 0, 0, NULL }, /* reserved */
1128  /* 4*/	EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129  /* 5*/	EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130  /* 6*/	EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131  /* 7*/	EE(SEND_ERR,	handle_txe_err,    "TxeErr")
1132  	/* the rest are reserved */
1133  };
1134  
1135  /*
1136   * Index into the Various section of the interrupt sources
1137   * corresponding to the Critical Temperature interrupt.
1138   */
1139  #define TCRIT_INT_SOURCE 4
1140  
1141  /*
1142   * SDMA error interrupt entry - refers to another register containing more
1143   * information.
1144   */
1145  static const struct err_reg_info sdma_eng_err =
1146  	EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147  
1148  static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149  /* 0*/	{ 0, 0, 0, NULL }, /* PbcInt */
1150  /* 1*/	{ 0, 0, 0, NULL }, /* GpioAssertInt */
1151  /* 2*/	EE(ASIC_QSFP1,	handle_qsfp_int,	"QSFP1"),
1152  /* 3*/	EE(ASIC_QSFP2,	handle_qsfp_int,	"QSFP2"),
1153  /* 4*/	{ 0, 0, 0, NULL }, /* TCritInt */
1154  	/* rest are reserved */
1155  };
1156  
1157  /*
1158   * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159   * register can not be derived from the MTU value because 10K is not
1160   * a power of 2. Therefore, we need a constant. Everything else can
1161   * be calculated.
1162   */
1163  #define DCC_CFG_PORT_MTU_CAP_10240 7
1164  
1165  /*
1166   * Table of the DC grouping of error interrupts.  Each entry refers to
1167   * another register containing more information.
1168   */
1169  static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170  /* 0*/	DC_EE1(DCC_ERR,		handle_dcc_err,	       "DCC Err"),
1171  /* 1*/	DC_EE2(DC_LCB_ERR,	handle_lcb_err,	       "LCB Err"),
1172  /* 2*/	DC_EE2(DC_DC8051_ERR,	handle_8051_interrupt, "DC8051 Interrupt"),
1173  /* 3*/	/* dc_lbm_int - special, see is_dc_int() */
1174  	/* the rest are reserved */
1175  };
1176  
1177  struct cntr_entry {
1178  	/*
1179  	 * counter name
1180  	 */
1181  	char *name;
1182  
1183  	/*
1184  	 * csr to read for name (if applicable)
1185  	 */
1186  	u64 csr;
1187  
1188  	/*
1189  	 * offset into dd or ppd to store the counter's value
1190  	 */
1191  	int offset;
1192  
1193  	/*
1194  	 * flags
1195  	 */
1196  	u8 flags;
1197  
1198  	/*
1199  	 * accessor for stat element, context either dd or ppd
1200  	 */
1201  	u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202  		       int mode, u64 data);
1203  };
1204  
1205  #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206  #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207  
1208  #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209  { \
1210  	name, \
1211  	csr, \
1212  	offset, \
1213  	flags, \
1214  	accessor \
1215  }
1216  
1217  /* 32bit RXE */
1218  #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219  CNTR_ELEM(#name, \
1220  	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1221  	  0, flags | CNTR_32BIT, \
1222  	  port_access_u32_csr)
1223  
1224  #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225  CNTR_ELEM(#name, \
1226  	  (counter * 8 + RCV_COUNTER_ARRAY32), \
1227  	  0, flags | CNTR_32BIT, \
1228  	  dev_access_u32_csr)
1229  
1230  /* 64bit RXE */
1231  #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232  CNTR_ELEM(#name, \
1233  	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1234  	  0, flags, \
1235  	  port_access_u64_csr)
1236  
1237  #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238  CNTR_ELEM(#name, \
1239  	  (counter * 8 + RCV_COUNTER_ARRAY64), \
1240  	  0, flags, \
1241  	  dev_access_u64_csr)
1242  
1243  #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244  #define OVR_ELM(ctx) \
1245  CNTR_ELEM("RcvHdrOvr" #ctx, \
1246  	  (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247  	  0, CNTR_NORMAL, port_access_u64_csr)
1248  
1249  /* 32bit TXE */
1250  #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251  CNTR_ELEM(#name, \
1252  	  (counter * 8 + SEND_COUNTER_ARRAY32), \
1253  	  0, flags | CNTR_32BIT, \
1254  	  port_access_u32_csr)
1255  
1256  /* 64bit TXE */
1257  #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258  CNTR_ELEM(#name, \
1259  	  (counter * 8 + SEND_COUNTER_ARRAY64), \
1260  	  0, flags, \
1261  	  port_access_u64_csr)
1262  
1263  # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264  CNTR_ELEM(#name,\
1265  	  counter * 8 + SEND_COUNTER_ARRAY64, \
1266  	  0, \
1267  	  flags, \
1268  	  dev_access_u64_csr)
1269  
1270  /* CCE */
1271  #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272  CNTR_ELEM(#name, \
1273  	  (counter * 8 + CCE_COUNTER_ARRAY32), \
1274  	  0, flags | CNTR_32BIT, \
1275  	  dev_access_u32_csr)
1276  
1277  #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278  CNTR_ELEM(#name, \
1279  	  (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280  	  0, flags | CNTR_32BIT, \
1281  	  dev_access_u32_csr)
1282  
1283  /* DC */
1284  #define DC_PERF_CNTR(name, counter, flags) \
1285  CNTR_ELEM(#name, \
1286  	  counter, \
1287  	  0, \
1288  	  flags, \
1289  	  dev_access_u64_csr)
1290  
1291  #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292  CNTR_ELEM(#name, \
1293  	  counter, \
1294  	  0, \
1295  	  flags, \
1296  	  dc_access_lcb_cntr)
1297  
1298  /* ibp counters */
1299  #define SW_IBP_CNTR(name, cntr) \
1300  CNTR_ELEM(#name, \
1301  	  0, \
1302  	  0, \
1303  	  CNTR_SYNTH, \
1304  	  access_ibp_##cntr)
1305  
1306  /**
1307   * hfi_addr_from_offset - return addr for readq/writeq
1308   * @dd - the dd device
1309   * @offset - the offset of the CSR within bar0
1310   *
1311   * This routine selects the appropriate base address
1312   * based on the indicated offset.
1313   */
hfi1_addr_from_offset(const struct hfi1_devdata * dd,u32 offset)1314  static inline void __iomem *hfi1_addr_from_offset(
1315  	const struct hfi1_devdata *dd,
1316  	u32 offset)
1317  {
1318  	if (offset >= dd->base2_start)
1319  		return dd->kregbase2 + (offset - dd->base2_start);
1320  	return dd->kregbase1 + offset;
1321  }
1322  
1323  /**
1324   * read_csr - read CSR at the indicated offset
1325   * @dd - the dd device
1326   * @offset - the offset of the CSR within bar0
1327   *
1328   * Return: the value read or all FF's if there
1329   * is no mapping
1330   */
read_csr(const struct hfi1_devdata * dd,u32 offset)1331  u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332  {
1333  	if (dd->flags & HFI1_PRESENT)
1334  		return readq(hfi1_addr_from_offset(dd, offset));
1335  	return -1;
1336  }
1337  
1338  /**
1339   * write_csr - write CSR at the indicated offset
1340   * @dd - the dd device
1341   * @offset - the offset of the CSR within bar0
1342   * @value - value to write
1343   */
write_csr(const struct hfi1_devdata * dd,u32 offset,u64 value)1344  void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345  {
1346  	if (dd->flags & HFI1_PRESENT) {
1347  		void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348  
1349  		/* avoid write to RcvArray */
1350  		if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351  			return;
1352  		writeq(value, base);
1353  	}
1354  }
1355  
1356  /**
1357   * get_csr_addr - return te iomem address for offset
1358   * @dd - the dd device
1359   * @offset - the offset of the CSR within bar0
1360   *
1361   * Return: The iomem address to use in subsequent
1362   * writeq/readq operations.
1363   */
get_csr_addr(const struct hfi1_devdata * dd,u32 offset)1364  void __iomem *get_csr_addr(
1365  	const struct hfi1_devdata *dd,
1366  	u32 offset)
1367  {
1368  	if (dd->flags & HFI1_PRESENT)
1369  		return hfi1_addr_from_offset(dd, offset);
1370  	return NULL;
1371  }
1372  
read_write_csr(const struct hfi1_devdata * dd,u32 csr,int mode,u64 value)1373  static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374  				 int mode, u64 value)
1375  {
1376  	u64 ret;
1377  
1378  	if (mode == CNTR_MODE_R) {
1379  		ret = read_csr(dd, csr);
1380  	} else if (mode == CNTR_MODE_W) {
1381  		write_csr(dd, csr, value);
1382  		ret = value;
1383  	} else {
1384  		dd_dev_err(dd, "Invalid cntr register access mode");
1385  		return 0;
1386  	}
1387  
1388  	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389  	return ret;
1390  }
1391  
1392  /* Dev Access */
dev_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1393  static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394  			      void *context, int vl, int mode, u64 data)
1395  {
1396  	struct hfi1_devdata *dd = context;
1397  	u64 csr = entry->csr;
1398  
1399  	if (entry->flags & CNTR_SDMA) {
1400  		if (vl == CNTR_INVALID_VL)
1401  			return 0;
1402  		csr += 0x100 * vl;
1403  	} else {
1404  		if (vl != CNTR_INVALID_VL)
1405  			return 0;
1406  	}
1407  	return read_write_csr(dd, csr, mode, data);
1408  }
1409  
access_sde_err_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1410  static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411  			      void *context, int idx, int mode, u64 data)
1412  {
1413  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414  
1415  	if (dd->per_sdma && idx < dd->num_sdma)
1416  		return dd->per_sdma[idx].err_cnt;
1417  	return 0;
1418  }
1419  
access_sde_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1420  static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421  			      void *context, int idx, int mode, u64 data)
1422  {
1423  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424  
1425  	if (dd->per_sdma && idx < dd->num_sdma)
1426  		return dd->per_sdma[idx].sdma_int_cnt;
1427  	return 0;
1428  }
1429  
access_sde_idle_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1430  static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431  				   void *context, int idx, int mode, u64 data)
1432  {
1433  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434  
1435  	if (dd->per_sdma && idx < dd->num_sdma)
1436  		return dd->per_sdma[idx].idle_int_cnt;
1437  	return 0;
1438  }
1439  
access_sde_progress_int_cnt(const struct cntr_entry * entry,void * context,int idx,int mode,u64 data)1440  static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441  				       void *context, int idx, int mode,
1442  				       u64 data)
1443  {
1444  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445  
1446  	if (dd->per_sdma && idx < dd->num_sdma)
1447  		return dd->per_sdma[idx].progress_int_cnt;
1448  	return 0;
1449  }
1450  
dev_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1451  static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452  			      int vl, int mode, u64 data)
1453  {
1454  	struct hfi1_devdata *dd = context;
1455  
1456  	u64 val = 0;
1457  	u64 csr = entry->csr;
1458  
1459  	if (entry->flags & CNTR_VL) {
1460  		if (vl == CNTR_INVALID_VL)
1461  			return 0;
1462  		csr += 8 * vl;
1463  	} else {
1464  		if (vl != CNTR_INVALID_VL)
1465  			return 0;
1466  	}
1467  
1468  	val = read_write_csr(dd, csr, mode, data);
1469  	return val;
1470  }
1471  
dc_access_lcb_cntr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1472  static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473  			      int vl, int mode, u64 data)
1474  {
1475  	struct hfi1_devdata *dd = context;
1476  	u32 csr = entry->csr;
1477  	int ret = 0;
1478  
1479  	if (vl != CNTR_INVALID_VL)
1480  		return 0;
1481  	if (mode == CNTR_MODE_R)
1482  		ret = read_lcb_csr(dd, csr, &data);
1483  	else if (mode == CNTR_MODE_W)
1484  		ret = write_lcb_csr(dd, csr, data);
1485  
1486  	if (ret) {
1487  		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488  		return 0;
1489  	}
1490  
1491  	hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492  	return data;
1493  }
1494  
1495  /* Port Access */
port_access_u32_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1496  static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497  			       int vl, int mode, u64 data)
1498  {
1499  	struct hfi1_pportdata *ppd = context;
1500  
1501  	if (vl != CNTR_INVALID_VL)
1502  		return 0;
1503  	return read_write_csr(ppd->dd, entry->csr, mode, data);
1504  }
1505  
port_access_u64_csr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1506  static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507  			       void *context, int vl, int mode, u64 data)
1508  {
1509  	struct hfi1_pportdata *ppd = context;
1510  	u64 val;
1511  	u64 csr = entry->csr;
1512  
1513  	if (entry->flags & CNTR_VL) {
1514  		if (vl == CNTR_INVALID_VL)
1515  			return 0;
1516  		csr += 8 * vl;
1517  	} else {
1518  		if (vl != CNTR_INVALID_VL)
1519  			return 0;
1520  	}
1521  	val = read_write_csr(ppd->dd, csr, mode, data);
1522  	return val;
1523  }
1524  
1525  /* Software defined */
read_write_sw(struct hfi1_devdata * dd,u64 * cntr,int mode,u64 data)1526  static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527  				u64 data)
1528  {
1529  	u64 ret;
1530  
1531  	if (mode == CNTR_MODE_R) {
1532  		ret = *cntr;
1533  	} else if (mode == CNTR_MODE_W) {
1534  		*cntr = data;
1535  		ret = data;
1536  	} else {
1537  		dd_dev_err(dd, "Invalid cntr sw access mode");
1538  		return 0;
1539  	}
1540  
1541  	hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542  
1543  	return ret;
1544  }
1545  
access_sw_link_dn_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1546  static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547  				 int vl, int mode, u64 data)
1548  {
1549  	struct hfi1_pportdata *ppd = context;
1550  
1551  	if (vl != CNTR_INVALID_VL)
1552  		return 0;
1553  	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554  }
1555  
access_sw_link_up_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1556  static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557  				 int vl, int mode, u64 data)
1558  {
1559  	struct hfi1_pportdata *ppd = context;
1560  
1561  	if (vl != CNTR_INVALID_VL)
1562  		return 0;
1563  	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564  }
1565  
access_sw_unknown_frame_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1566  static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567  				       void *context, int vl, int mode,
1568  				       u64 data)
1569  {
1570  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571  
1572  	if (vl != CNTR_INVALID_VL)
1573  		return 0;
1574  	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575  }
1576  
access_sw_xmit_discards(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1577  static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578  				   void *context, int vl, int mode, u64 data)
1579  {
1580  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581  	u64 zero = 0;
1582  	u64 *counter;
1583  
1584  	if (vl == CNTR_INVALID_VL)
1585  		counter = &ppd->port_xmit_discards;
1586  	else if (vl >= 0 && vl < C_VL_COUNT)
1587  		counter = &ppd->port_xmit_discards_vl[vl];
1588  	else
1589  		counter = &zero;
1590  
1591  	return read_write_sw(ppd->dd, counter, mode, data);
1592  }
1593  
access_xmit_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1594  static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595  				       void *context, int vl, int mode,
1596  				       u64 data)
1597  {
1598  	struct hfi1_pportdata *ppd = context;
1599  
1600  	if (vl != CNTR_INVALID_VL)
1601  		return 0;
1602  
1603  	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604  			     mode, data);
1605  }
1606  
access_rcv_constraint_errs(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1607  static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608  				      void *context, int vl, int mode, u64 data)
1609  {
1610  	struct hfi1_pportdata *ppd = context;
1611  
1612  	if (vl != CNTR_INVALID_VL)
1613  		return 0;
1614  
1615  	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616  			     mode, data);
1617  }
1618  
get_all_cpu_total(u64 __percpu * cntr)1619  u64 get_all_cpu_total(u64 __percpu *cntr)
1620  {
1621  	int cpu;
1622  	u64 counter = 0;
1623  
1624  	for_each_possible_cpu(cpu)
1625  		counter += *per_cpu_ptr(cntr, cpu);
1626  	return counter;
1627  }
1628  
read_write_cpu(struct hfi1_devdata * dd,u64 * z_val,u64 __percpu * cntr,int vl,int mode,u64 data)1629  static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630  			  u64 __percpu *cntr,
1631  			  int vl, int mode, u64 data)
1632  {
1633  	u64 ret = 0;
1634  
1635  	if (vl != CNTR_INVALID_VL)
1636  		return 0;
1637  
1638  	if (mode == CNTR_MODE_R) {
1639  		ret = get_all_cpu_total(cntr) - *z_val;
1640  	} else if (mode == CNTR_MODE_W) {
1641  		/* A write can only zero the counter */
1642  		if (data == 0)
1643  			*z_val = get_all_cpu_total(cntr);
1644  		else
1645  			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646  	} else {
1647  		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648  		return 0;
1649  	}
1650  
1651  	return ret;
1652  }
1653  
access_sw_cpu_intr(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1654  static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655  			      void *context, int vl, int mode, u64 data)
1656  {
1657  	struct hfi1_devdata *dd = context;
1658  
1659  	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660  			      mode, data);
1661  }
1662  
access_sw_cpu_rcv_limit(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1663  static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664  				   void *context, int vl, int mode, u64 data)
1665  {
1666  	struct hfi1_devdata *dd = context;
1667  
1668  	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669  			      mode, data);
1670  }
1671  
access_sw_pio_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1672  static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673  			      void *context, int vl, int mode, u64 data)
1674  {
1675  	struct hfi1_devdata *dd = context;
1676  
1677  	return dd->verbs_dev.n_piowait;
1678  }
1679  
access_sw_pio_drain(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1680  static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681  			       void *context, int vl, int mode, u64 data)
1682  {
1683  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684  
1685  	return dd->verbs_dev.n_piodrain;
1686  }
1687  
access_sw_vtx_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1688  static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689  			      void *context, int vl, int mode, u64 data)
1690  {
1691  	struct hfi1_devdata *dd = context;
1692  
1693  	return dd->verbs_dev.n_txwait;
1694  }
1695  
access_sw_kmem_wait(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1696  static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697  			       void *context, int vl, int mode, u64 data)
1698  {
1699  	struct hfi1_devdata *dd = context;
1700  
1701  	return dd->verbs_dev.n_kmem_wait;
1702  }
1703  
access_sw_send_schedule(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1704  static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705  				   void *context, int vl, int mode, u64 data)
1706  {
1707  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708  
1709  	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710  			      mode, data);
1711  }
1712  
1713  /* Software counters for the error status bits within MISC_ERR_STATUS */
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1714  static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715  					     void *context, int vl, int mode,
1716  					     u64 data)
1717  {
1718  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719  
1720  	return dd->misc_err_status_cnt[12];
1721  }
1722  
access_misc_mbist_fail_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1723  static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724  					  void *context, int vl, int mode,
1725  					  u64 data)
1726  {
1727  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728  
1729  	return dd->misc_err_status_cnt[11];
1730  }
1731  
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1732  static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733  					       void *context, int vl, int mode,
1734  					       u64 data)
1735  {
1736  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737  
1738  	return dd->misc_err_status_cnt[10];
1739  }
1740  
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1741  static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742  						 void *context, int vl,
1743  						 int mode, u64 data)
1744  {
1745  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746  
1747  	return dd->misc_err_status_cnt[9];
1748  }
1749  
access_misc_efuse_write_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1750  static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751  					   void *context, int vl, int mode,
1752  					   u64 data)
1753  {
1754  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755  
1756  	return dd->misc_err_status_cnt[8];
1757  }
1758  
access_misc_efuse_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1759  static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760  				const struct cntr_entry *entry,
1761  				void *context, int vl, int mode, u64 data)
1762  {
1763  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764  
1765  	return dd->misc_err_status_cnt[7];
1766  }
1767  
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1768  static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769  						void *context, int vl,
1770  						int mode, u64 data)
1771  {
1772  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773  
1774  	return dd->misc_err_status_cnt[6];
1775  }
1776  
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1777  static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778  					      void *context, int vl, int mode,
1779  					      u64 data)
1780  {
1781  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782  
1783  	return dd->misc_err_status_cnt[5];
1784  }
1785  
access_misc_key_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1786  static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787  					    void *context, int vl, int mode,
1788  					    u64 data)
1789  {
1790  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791  
1792  	return dd->misc_err_status_cnt[4];
1793  }
1794  
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1795  static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796  						 void *context, int vl,
1797  						 int mode, u64 data)
1798  {
1799  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800  
1801  	return dd->misc_err_status_cnt[3];
1802  }
1803  
access_misc_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1804  static u64 access_misc_csr_write_bad_addr_err_cnt(
1805  				const struct cntr_entry *entry,
1806  				void *context, int vl, int mode, u64 data)
1807  {
1808  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809  
1810  	return dd->misc_err_status_cnt[2];
1811  }
1812  
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1813  static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814  						 void *context, int vl,
1815  						 int mode, u64 data)
1816  {
1817  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818  
1819  	return dd->misc_err_status_cnt[1];
1820  }
1821  
access_misc_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1822  static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823  					  void *context, int vl, int mode,
1824  					  u64 data)
1825  {
1826  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827  
1828  	return dd->misc_err_status_cnt[0];
1829  }
1830  
1831  /*
1832   * Software counter for the aggregate of
1833   * individual CceErrStatus counters
1834   */
access_sw_cce_err_status_aggregated_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1835  static u64 access_sw_cce_err_status_aggregated_cnt(
1836  				const struct cntr_entry *entry,
1837  				void *context, int vl, int mode, u64 data)
1838  {
1839  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840  
1841  	return dd->sw_cce_err_status_aggregate;
1842  }
1843  
1844  /*
1845   * Software counters corresponding to each of the
1846   * error status bits within CceErrStatus
1847   */
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1848  static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849  					      void *context, int vl, int mode,
1850  					      u64 data)
1851  {
1852  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853  
1854  	return dd->cce_err_status_cnt[40];
1855  }
1856  
access_cce_int_map_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1857  static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858  					  void *context, int vl, int mode,
1859  					  u64 data)
1860  {
1861  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862  
1863  	return dd->cce_err_status_cnt[39];
1864  }
1865  
access_cce_int_map_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1866  static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867  					  void *context, int vl, int mode,
1868  					  u64 data)
1869  {
1870  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871  
1872  	return dd->cce_err_status_cnt[38];
1873  }
1874  
access_cce_msix_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1875  static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876  					     void *context, int vl, int mode,
1877  					     u64 data)
1878  {
1879  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880  
1881  	return dd->cce_err_status_cnt[37];
1882  }
1883  
access_cce_msix_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1884  static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885  					     void *context, int vl, int mode,
1886  					     u64 data)
1887  {
1888  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889  
1890  	return dd->cce_err_status_cnt[36];
1891  }
1892  
access_cce_rxdma_conv_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1893  static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894  				const struct cntr_entry *entry,
1895  				void *context, int vl, int mode, u64 data)
1896  {
1897  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898  
1899  	return dd->cce_err_status_cnt[35];
1900  }
1901  
access_cce_rcpl_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1902  static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903  				const struct cntr_entry *entry,
1904  				void *context, int vl, int mode, u64 data)
1905  {
1906  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907  
1908  	return dd->cce_err_status_cnt[34];
1909  }
1910  
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1911  static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912  						 void *context, int vl,
1913  						 int mode, u64 data)
1914  {
1915  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916  
1917  	return dd->cce_err_status_cnt[33];
1918  }
1919  
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1920  static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921  						void *context, int vl, int mode,
1922  						u64 data)
1923  {
1924  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925  
1926  	return dd->cce_err_status_cnt[32];
1927  }
1928  
access_la_triggered_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1929  static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930  				   void *context, int vl, int mode, u64 data)
1931  {
1932  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933  
1934  	return dd->cce_err_status_cnt[31];
1935  }
1936  
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1937  static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938  					       void *context, int vl, int mode,
1939  					       u64 data)
1940  {
1941  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942  
1943  	return dd->cce_err_status_cnt[30];
1944  }
1945  
access_pcic_receive_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1946  static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947  					      void *context, int vl, int mode,
1948  					      u64 data)
1949  {
1950  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951  
1952  	return dd->cce_err_status_cnt[29];
1953  }
1954  
access_pcic_transmit_back_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1955  static u64 access_pcic_transmit_back_parity_err_cnt(
1956  				const struct cntr_entry *entry,
1957  				void *context, int vl, int mode, u64 data)
1958  {
1959  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960  
1961  	return dd->cce_err_status_cnt[28];
1962  }
1963  
access_pcic_transmit_front_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1964  static u64 access_pcic_transmit_front_parity_err_cnt(
1965  				const struct cntr_entry *entry,
1966  				void *context, int vl, int mode, u64 data)
1967  {
1968  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969  
1970  	return dd->cce_err_status_cnt[27];
1971  }
1972  
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1973  static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974  					     void *context, int vl, int mode,
1975  					     u64 data)
1976  {
1977  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978  
1979  	return dd->cce_err_status_cnt[26];
1980  }
1981  
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1982  static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983  					    void *context, int vl, int mode,
1984  					    u64 data)
1985  {
1986  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987  
1988  	return dd->cce_err_status_cnt[25];
1989  }
1990  
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)1991  static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992  					      void *context, int vl, int mode,
1993  					      u64 data)
1994  {
1995  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996  
1997  	return dd->cce_err_status_cnt[24];
1998  }
1999  
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2000  static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001  					     void *context, int vl, int mode,
2002  					     u64 data)
2003  {
2004  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005  
2006  	return dd->cce_err_status_cnt[23];
2007  }
2008  
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2009  static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010  						 void *context, int vl,
2011  						 int mode, u64 data)
2012  {
2013  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014  
2015  	return dd->cce_err_status_cnt[22];
2016  }
2017  
access_pcic_retry_mem_unc_err(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2018  static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019  					 void *context, int vl, int mode,
2020  					 u64 data)
2021  {
2022  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023  
2024  	return dd->cce_err_status_cnt[21];
2025  }
2026  
access_pcic_n_post_dat_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2027  static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028  				const struct cntr_entry *entry,
2029  				void *context, int vl, int mode, u64 data)
2030  {
2031  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032  
2033  	return dd->cce_err_status_cnt[20];
2034  }
2035  
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2036  static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037  						 void *context, int vl,
2038  						 int mode, u64 data)
2039  {
2040  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041  
2042  	return dd->cce_err_status_cnt[19];
2043  }
2044  
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2045  static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046  					     void *context, int vl, int mode,
2047  					     u64 data)
2048  {
2049  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050  
2051  	return dd->cce_err_status_cnt[18];
2052  }
2053  
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2054  static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055  					    void *context, int vl, int mode,
2056  					    u64 data)
2057  {
2058  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059  
2060  	return dd->cce_err_status_cnt[17];
2061  }
2062  
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2063  static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064  					      void *context, int vl, int mode,
2065  					      u64 data)
2066  {
2067  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068  
2069  	return dd->cce_err_status_cnt[16];
2070  }
2071  
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2072  static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073  					     void *context, int vl, int mode,
2074  					     u64 data)
2075  {
2076  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077  
2078  	return dd->cce_err_status_cnt[15];
2079  }
2080  
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2081  static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082  						 void *context, int vl,
2083  						 int mode, u64 data)
2084  {
2085  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086  
2087  	return dd->cce_err_status_cnt[14];
2088  }
2089  
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2090  static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091  					     void *context, int vl, int mode,
2092  					     u64 data)
2093  {
2094  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095  
2096  	return dd->cce_err_status_cnt[13];
2097  }
2098  
access_cce_cli1_async_fifo_dbg_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2099  static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100  				const struct cntr_entry *entry,
2101  				void *context, int vl, int mode, u64 data)
2102  {
2103  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104  
2105  	return dd->cce_err_status_cnt[12];
2106  }
2107  
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2108  static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109  				const struct cntr_entry *entry,
2110  				void *context, int vl, int mode, u64 data)
2111  {
2112  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113  
2114  	return dd->cce_err_status_cnt[11];
2115  }
2116  
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2117  static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118  				const struct cntr_entry *entry,
2119  				void *context, int vl, int mode, u64 data)
2120  {
2121  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122  
2123  	return dd->cce_err_status_cnt[10];
2124  }
2125  
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2126  static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127  				const struct cntr_entry *entry,
2128  				void *context, int vl, int mode, u64 data)
2129  {
2130  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131  
2132  	return dd->cce_err_status_cnt[9];
2133  }
2134  
access_cce_cli2_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2135  static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136  				const struct cntr_entry *entry,
2137  				void *context, int vl, int mode, u64 data)
2138  {
2139  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140  
2141  	return dd->cce_err_status_cnt[8];
2142  }
2143  
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2144  static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145  						 void *context, int vl,
2146  						 int mode, u64 data)
2147  {
2148  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149  
2150  	return dd->cce_err_status_cnt[7];
2151  }
2152  
access_cce_cli0_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2153  static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154  				const struct cntr_entry *entry,
2155  				void *context, int vl, int mode, u64 data)
2156  {
2157  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158  
2159  	return dd->cce_err_status_cnt[6];
2160  }
2161  
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2162  static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163  					       void *context, int vl, int mode,
2164  					       u64 data)
2165  {
2166  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167  
2168  	return dd->cce_err_status_cnt[5];
2169  }
2170  
access_cce_trgt_access_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2171  static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172  					  void *context, int vl, int mode,
2173  					  u64 data)
2174  {
2175  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176  
2177  	return dd->cce_err_status_cnt[4];
2178  }
2179  
access_cce_trgt_async_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2180  static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181  				const struct cntr_entry *entry,
2182  				void *context, int vl, int mode, u64 data)
2183  {
2184  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185  
2186  	return dd->cce_err_status_cnt[3];
2187  }
2188  
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2189  static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190  						 void *context, int vl,
2191  						 int mode, u64 data)
2192  {
2193  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194  
2195  	return dd->cce_err_status_cnt[2];
2196  }
2197  
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2198  static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199  						void *context, int vl,
2200  						int mode, u64 data)
2201  {
2202  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203  
2204  	return dd->cce_err_status_cnt[1];
2205  }
2206  
access_ccs_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2207  static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208  					 void *context, int vl, int mode,
2209  					 u64 data)
2210  {
2211  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212  
2213  	return dd->cce_err_status_cnt[0];
2214  }
2215  
2216  /*
2217   * Software counters corresponding to each of the
2218   * error status bits within RcvErrStatus
2219   */
access_rx_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2220  static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221  					void *context, int vl, int mode,
2222  					u64 data)
2223  {
2224  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225  
2226  	return dd->rcv_err_status_cnt[63];
2227  }
2228  
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2229  static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230  						void *context, int vl,
2231  						int mode, u64 data)
2232  {
2233  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234  
2235  	return dd->rcv_err_status_cnt[62];
2236  }
2237  
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2238  static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239  					       void *context, int vl, int mode,
2240  					       u64 data)
2241  {
2242  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243  
2244  	return dd->rcv_err_status_cnt[61];
2245  }
2246  
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2247  static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248  					 void *context, int vl, int mode,
2249  					 u64 data)
2250  {
2251  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252  
2253  	return dd->rcv_err_status_cnt[60];
2254  }
2255  
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2256  static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257  						 void *context, int vl,
2258  						 int mode, u64 data)
2259  {
2260  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261  
2262  	return dd->rcv_err_status_cnt[59];
2263  }
2264  
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2265  static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266  						 void *context, int vl,
2267  						 int mode, u64 data)
2268  {
2269  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270  
2271  	return dd->rcv_err_status_cnt[58];
2272  }
2273  
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2274  static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275  					    void *context, int vl, int mode,
2276  					    u64 data)
2277  {
2278  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279  
2280  	return dd->rcv_err_status_cnt[57];
2281  }
2282  
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2283  static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284  					   void *context, int vl, int mode,
2285  					   u64 data)
2286  {
2287  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288  
2289  	return dd->rcv_err_status_cnt[56];
2290  }
2291  
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2292  static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293  					   void *context, int vl, int mode,
2294  					   u64 data)
2295  {
2296  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297  
2298  	return dd->rcv_err_status_cnt[55];
2299  }
2300  
access_rx_dma_data_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2301  static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302  				const struct cntr_entry *entry,
2303  				void *context, int vl, int mode, u64 data)
2304  {
2305  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306  
2307  	return dd->rcv_err_status_cnt[54];
2308  }
2309  
access_rx_dma_data_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2310  static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311  				const struct cntr_entry *entry,
2312  				void *context, int vl, int mode, u64 data)
2313  {
2314  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315  
2316  	return dd->rcv_err_status_cnt[53];
2317  }
2318  
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2319  static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320  						 void *context, int vl,
2321  						 int mode, u64 data)
2322  {
2323  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324  
2325  	return dd->rcv_err_status_cnt[52];
2326  }
2327  
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2328  static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329  						 void *context, int vl,
2330  						 int mode, u64 data)
2331  {
2332  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333  
2334  	return dd->rcv_err_status_cnt[51];
2335  }
2336  
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2337  static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338  						 void *context, int vl,
2339  						 int mode, u64 data)
2340  {
2341  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342  
2343  	return dd->rcv_err_status_cnt[50];
2344  }
2345  
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2346  static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347  						 void *context, int vl,
2348  						 int mode, u64 data)
2349  {
2350  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351  
2352  	return dd->rcv_err_status_cnt[49];
2353  }
2354  
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2355  static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356  						 void *context, int vl,
2357  						 int mode, u64 data)
2358  {
2359  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360  
2361  	return dd->rcv_err_status_cnt[48];
2362  }
2363  
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2364  static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365  						 void *context, int vl,
2366  						 int mode, u64 data)
2367  {
2368  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369  
2370  	return dd->rcv_err_status_cnt[47];
2371  }
2372  
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2373  static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374  					 void *context, int vl, int mode,
2375  					 u64 data)
2376  {
2377  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378  
2379  	return dd->rcv_err_status_cnt[46];
2380  }
2381  
access_rx_hq_intr_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2382  static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383  				const struct cntr_entry *entry,
2384  				void *context, int vl, int mode, u64 data)
2385  {
2386  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387  
2388  	return dd->rcv_err_status_cnt[45];
2389  }
2390  
access_rx_lookup_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2391  static u64 access_rx_lookup_csr_parity_err_cnt(
2392  				const struct cntr_entry *entry,
2393  				void *context, int vl, int mode, u64 data)
2394  {
2395  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396  
2397  	return dd->rcv_err_status_cnt[44];
2398  }
2399  
access_rx_lookup_rcv_array_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2400  static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401  				const struct cntr_entry *entry,
2402  				void *context, int vl, int mode, u64 data)
2403  {
2404  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405  
2406  	return dd->rcv_err_status_cnt[43];
2407  }
2408  
access_rx_lookup_rcv_array_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2409  static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410  				const struct cntr_entry *entry,
2411  				void *context, int vl, int mode, u64 data)
2412  {
2413  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414  
2415  	return dd->rcv_err_status_cnt[42];
2416  }
2417  
access_rx_lookup_des_part2_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2418  static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419  				const struct cntr_entry *entry,
2420  				void *context, int vl, int mode, u64 data)
2421  {
2422  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423  
2424  	return dd->rcv_err_status_cnt[41];
2425  }
2426  
access_rx_lookup_des_part1_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2427  static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428  				const struct cntr_entry *entry,
2429  				void *context, int vl, int mode, u64 data)
2430  {
2431  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432  
2433  	return dd->rcv_err_status_cnt[40];
2434  }
2435  
access_rx_lookup_des_part1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2436  static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437  				const struct cntr_entry *entry,
2438  				void *context, int vl, int mode, u64 data)
2439  {
2440  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441  
2442  	return dd->rcv_err_status_cnt[39];
2443  }
2444  
access_rx_rbuf_next_free_buf_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2445  static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446  				const struct cntr_entry *entry,
2447  				void *context, int vl, int mode, u64 data)
2448  {
2449  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450  
2451  	return dd->rcv_err_status_cnt[38];
2452  }
2453  
access_rx_rbuf_next_free_buf_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2454  static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455  				const struct cntr_entry *entry,
2456  				void *context, int vl, int mode, u64 data)
2457  {
2458  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459  
2460  	return dd->rcv_err_status_cnt[37];
2461  }
2462  
access_rbuf_fl_init_wr_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2463  static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464  				const struct cntr_entry *entry,
2465  				void *context, int vl, int mode, u64 data)
2466  {
2467  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468  
2469  	return dd->rcv_err_status_cnt[36];
2470  }
2471  
access_rx_rbuf_fl_initdone_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2472  static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473  				const struct cntr_entry *entry,
2474  				void *context, int vl, int mode, u64 data)
2475  {
2476  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477  
2478  	return dd->rcv_err_status_cnt[35];
2479  }
2480  
access_rx_rbuf_fl_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2481  static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482  				const struct cntr_entry *entry,
2483  				void *context, int vl, int mode, u64 data)
2484  {
2485  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486  
2487  	return dd->rcv_err_status_cnt[34];
2488  }
2489  
access_rx_rbuf_fl_rd_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2490  static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491  				const struct cntr_entry *entry,
2492  				void *context, int vl, int mode, u64 data)
2493  {
2494  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495  
2496  	return dd->rcv_err_status_cnt[33];
2497  }
2498  
access_rx_rbuf_empty_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2499  static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500  					void *context, int vl, int mode,
2501  					u64 data)
2502  {
2503  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504  
2505  	return dd->rcv_err_status_cnt[32];
2506  }
2507  
access_rx_rbuf_full_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2508  static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509  				       void *context, int vl, int mode,
2510  				       u64 data)
2511  {
2512  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513  
2514  	return dd->rcv_err_status_cnt[31];
2515  }
2516  
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2517  static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518  					  void *context, int vl, int mode,
2519  					  u64 data)
2520  {
2521  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522  
2523  	return dd->rcv_err_status_cnt[30];
2524  }
2525  
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2526  static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527  					     void *context, int vl, int mode,
2528  					     u64 data)
2529  {
2530  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531  
2532  	return dd->rcv_err_status_cnt[29];
2533  }
2534  
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2535  static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536  						 void *context, int vl,
2537  						 int mode, u64 data)
2538  {
2539  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540  
2541  	return dd->rcv_err_status_cnt[28];
2542  }
2543  
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2544  static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545  				const struct cntr_entry *entry,
2546  				void *context, int vl, int mode, u64 data)
2547  {
2548  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549  
2550  	return dd->rcv_err_status_cnt[27];
2551  }
2552  
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2553  static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554  				const struct cntr_entry *entry,
2555  				void *context, int vl, int mode, u64 data)
2556  {
2557  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558  
2559  	return dd->rcv_err_status_cnt[26];
2560  }
2561  
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2562  static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563  				const struct cntr_entry *entry,
2564  				void *context, int vl, int mode, u64 data)
2565  {
2566  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567  
2568  	return dd->rcv_err_status_cnt[25];
2569  }
2570  
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2571  static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572  				const struct cntr_entry *entry,
2573  				void *context, int vl, int mode, u64 data)
2574  {
2575  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576  
2577  	return dd->rcv_err_status_cnt[24];
2578  }
2579  
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2580  static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581  				const struct cntr_entry *entry,
2582  				void *context, int vl, int mode, u64 data)
2583  {
2584  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585  
2586  	return dd->rcv_err_status_cnt[23];
2587  }
2588  
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2589  static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590  				const struct cntr_entry *entry,
2591  				void *context, int vl, int mode, u64 data)
2592  {
2593  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594  
2595  	return dd->rcv_err_status_cnt[22];
2596  }
2597  
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2598  static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599  				const struct cntr_entry *entry,
2600  				void *context, int vl, int mode, u64 data)
2601  {
2602  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603  
2604  	return dd->rcv_err_status_cnt[21];
2605  }
2606  
access_rx_rbuf_block_list_read_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2607  static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608  				const struct cntr_entry *entry,
2609  				void *context, int vl, int mode, u64 data)
2610  {
2611  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612  
2613  	return dd->rcv_err_status_cnt[20];
2614  }
2615  
access_rx_rbuf_block_list_read_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2616  static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617  				const struct cntr_entry *entry,
2618  				void *context, int vl, int mode, u64 data)
2619  {
2620  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621  
2622  	return dd->rcv_err_status_cnt[19];
2623  }
2624  
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2625  static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626  						 void *context, int vl,
2627  						 int mode, u64 data)
2628  {
2629  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630  
2631  	return dd->rcv_err_status_cnt[18];
2632  }
2633  
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2634  static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635  						 void *context, int vl,
2636  						 int mode, u64 data)
2637  {
2638  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639  
2640  	return dd->rcv_err_status_cnt[17];
2641  }
2642  
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2643  static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644  				const struct cntr_entry *entry,
2645  				void *context, int vl, int mode, u64 data)
2646  {
2647  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648  
2649  	return dd->rcv_err_status_cnt[16];
2650  }
2651  
access_rx_rbuf_lookup_des_reg_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2652  static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653  				const struct cntr_entry *entry,
2654  				void *context, int vl, int mode, u64 data)
2655  {
2656  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657  
2658  	return dd->rcv_err_status_cnt[15];
2659  }
2660  
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2661  static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662  						void *context, int vl,
2663  						int mode, u64 data)
2664  {
2665  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666  
2667  	return dd->rcv_err_status_cnt[14];
2668  }
2669  
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2670  static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671  						void *context, int vl,
2672  						int mode, u64 data)
2673  {
2674  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675  
2676  	return dd->rcv_err_status_cnt[13];
2677  }
2678  
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2679  static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680  					      void *context, int vl, int mode,
2681  					      u64 data)
2682  {
2683  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684  
2685  	return dd->rcv_err_status_cnt[12];
2686  }
2687  
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2688  static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689  					  void *context, int vl, int mode,
2690  					  u64 data)
2691  {
2692  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693  
2694  	return dd->rcv_err_status_cnt[11];
2695  }
2696  
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2697  static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698  					  void *context, int vl, int mode,
2699  					  u64 data)
2700  {
2701  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702  
2703  	return dd->rcv_err_status_cnt[10];
2704  }
2705  
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2706  static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707  					       void *context, int vl, int mode,
2708  					       u64 data)
2709  {
2710  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711  
2712  	return dd->rcv_err_status_cnt[9];
2713  }
2714  
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2715  static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716  					    void *context, int vl, int mode,
2717  					    u64 data)
2718  {
2719  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720  
2721  	return dd->rcv_err_status_cnt[8];
2722  }
2723  
access_rx_rcv_qp_map_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2724  static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725  				const struct cntr_entry *entry,
2726  				void *context, int vl, int mode, u64 data)
2727  {
2728  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729  
2730  	return dd->rcv_err_status_cnt[7];
2731  }
2732  
access_rx_rcv_qp_map_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2733  static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734  				const struct cntr_entry *entry,
2735  				void *context, int vl, int mode, u64 data)
2736  {
2737  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738  
2739  	return dd->rcv_err_status_cnt[6];
2740  }
2741  
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2742  static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743  					  void *context, int vl, int mode,
2744  					  u64 data)
2745  {
2746  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747  
2748  	return dd->rcv_err_status_cnt[5];
2749  }
2750  
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2751  static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752  					  void *context, int vl, int mode,
2753  					  u64 data)
2754  {
2755  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756  
2757  	return dd->rcv_err_status_cnt[4];
2758  }
2759  
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2760  static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761  					 void *context, int vl, int mode,
2762  					 u64 data)
2763  {
2764  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765  
2766  	return dd->rcv_err_status_cnt[3];
2767  }
2768  
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2769  static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770  					 void *context, int vl, int mode,
2771  					 u64 data)
2772  {
2773  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774  
2775  	return dd->rcv_err_status_cnt[2];
2776  }
2777  
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2778  static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779  					    void *context, int vl, int mode,
2780  					    u64 data)
2781  {
2782  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783  
2784  	return dd->rcv_err_status_cnt[1];
2785  }
2786  
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2787  static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788  					 void *context, int vl, int mode,
2789  					 u64 data)
2790  {
2791  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792  
2793  	return dd->rcv_err_status_cnt[0];
2794  }
2795  
2796  /*
2797   * Software counters corresponding to each of the
2798   * error status bits within SendPioErrStatus
2799   */
access_pio_pec_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2800  static u64 access_pio_pec_sop_head_parity_err_cnt(
2801  				const struct cntr_entry *entry,
2802  				void *context, int vl, int mode, u64 data)
2803  {
2804  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805  
2806  	return dd->send_pio_err_status_cnt[35];
2807  }
2808  
access_pio_pcc_sop_head_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2809  static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810  				const struct cntr_entry *entry,
2811  				void *context, int vl, int mode, u64 data)
2812  {
2813  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814  
2815  	return dd->send_pio_err_status_cnt[34];
2816  }
2817  
access_pio_last_returned_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2818  static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819  				const struct cntr_entry *entry,
2820  				void *context, int vl, int mode, u64 data)
2821  {
2822  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823  
2824  	return dd->send_pio_err_status_cnt[33];
2825  }
2826  
access_pio_current_free_cnt_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2827  static u64 access_pio_current_free_cnt_parity_err_cnt(
2828  				const struct cntr_entry *entry,
2829  				void *context, int vl, int mode, u64 data)
2830  {
2831  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832  
2833  	return dd->send_pio_err_status_cnt[32];
2834  }
2835  
access_pio_reserved_31_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2836  static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837  					  void *context, int vl, int mode,
2838  					  u64 data)
2839  {
2840  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841  
2842  	return dd->send_pio_err_status_cnt[31];
2843  }
2844  
access_pio_reserved_30_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2845  static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846  					  void *context, int vl, int mode,
2847  					  u64 data)
2848  {
2849  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850  
2851  	return dd->send_pio_err_status_cnt[30];
2852  }
2853  
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2854  static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855  					   void *context, int vl, int mode,
2856  					   u64 data)
2857  {
2858  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859  
2860  	return dd->send_pio_err_status_cnt[29];
2861  }
2862  
access_pio_ppmc_bqc_mem_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2863  static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864  				const struct cntr_entry *entry,
2865  				void *context, int vl, int mode, u64 data)
2866  {
2867  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868  
2869  	return dd->send_pio_err_status_cnt[28];
2870  }
2871  
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2872  static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873  					     void *context, int vl, int mode,
2874  					     u64 data)
2875  {
2876  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877  
2878  	return dd->send_pio_err_status_cnt[27];
2879  }
2880  
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2881  static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882  					     void *context, int vl, int mode,
2883  					     u64 data)
2884  {
2885  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886  
2887  	return dd->send_pio_err_status_cnt[26];
2888  }
2889  
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2890  static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891  						void *context, int vl,
2892  						int mode, u64 data)
2893  {
2894  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895  
2896  	return dd->send_pio_err_status_cnt[25];
2897  }
2898  
access_pio_block_qw_count_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2899  static u64 access_pio_block_qw_count_parity_err_cnt(
2900  				const struct cntr_entry *entry,
2901  				void *context, int vl, int mode, u64 data)
2902  {
2903  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904  
2905  	return dd->send_pio_err_status_cnt[24];
2906  }
2907  
access_pio_write_qw_valid_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2908  static u64 access_pio_write_qw_valid_parity_err_cnt(
2909  				const struct cntr_entry *entry,
2910  				void *context, int vl, int mode, u64 data)
2911  {
2912  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913  
2914  	return dd->send_pio_err_status_cnt[23];
2915  }
2916  
access_pio_state_machine_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2917  static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918  					    void *context, int vl, int mode,
2919  					    u64 data)
2920  {
2921  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922  
2923  	return dd->send_pio_err_status_cnt[22];
2924  }
2925  
access_pio_write_data_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2926  static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927  						void *context, int vl,
2928  						int mode, u64 data)
2929  {
2930  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931  
2932  	return dd->send_pio_err_status_cnt[21];
2933  }
2934  
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2935  static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936  						void *context, int vl,
2937  						int mode, u64 data)
2938  {
2939  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940  
2941  	return dd->send_pio_err_status_cnt[20];
2942  }
2943  
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2944  static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945  						void *context, int vl,
2946  						int mode, u64 data)
2947  {
2948  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949  
2950  	return dd->send_pio_err_status_cnt[19];
2951  }
2952  
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2953  static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954  				const struct cntr_entry *entry,
2955  				void *context, int vl, int mode, u64 data)
2956  {
2957  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958  
2959  	return dd->send_pio_err_status_cnt[18];
2960  }
2961  
access_pio_init_sm_in_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2962  static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963  					 void *context, int vl, int mode,
2964  					 u64 data)
2965  {
2966  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967  
2968  	return dd->send_pio_err_status_cnt[17];
2969  }
2970  
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2971  static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972  					    void *context, int vl, int mode,
2973  					    u64 data)
2974  {
2975  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976  
2977  	return dd->send_pio_err_status_cnt[16];
2978  }
2979  
access_pio_credit_ret_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2980  static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981  				const struct cntr_entry *entry,
2982  				void *context, int vl, int mode, u64 data)
2983  {
2984  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985  
2986  	return dd->send_pio_err_status_cnt[15];
2987  }
2988  
access_pio_v1_len_mem_bank1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2989  static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990  				const struct cntr_entry *entry,
2991  				void *context, int vl, int mode, u64 data)
2992  {
2993  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994  
2995  	return dd->send_pio_err_status_cnt[14];
2996  }
2997  
access_pio_v1_len_mem_bank0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)2998  static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999  				const struct cntr_entry *entry,
3000  				void *context, int vl, int mode, u64 data)
3001  {
3002  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003  
3004  	return dd->send_pio_err_status_cnt[13];
3005  }
3006  
access_pio_v1_len_mem_bank1_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3007  static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008  				const struct cntr_entry *entry,
3009  				void *context, int vl, int mode, u64 data)
3010  {
3011  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012  
3013  	return dd->send_pio_err_status_cnt[12];
3014  }
3015  
access_pio_v1_len_mem_bank0_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3016  static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017  				const struct cntr_entry *entry,
3018  				void *context, int vl, int mode, u64 data)
3019  {
3020  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021  
3022  	return dd->send_pio_err_status_cnt[11];
3023  }
3024  
access_pio_sm_pkt_reset_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3025  static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026  				const struct cntr_entry *entry,
3027  				void *context, int vl, int mode, u64 data)
3028  {
3029  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030  
3031  	return dd->send_pio_err_status_cnt[10];
3032  }
3033  
access_pio_pkt_evict_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3034  static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035  				const struct cntr_entry *entry,
3036  				void *context, int vl, int mode, u64 data)
3037  {
3038  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039  
3040  	return dd->send_pio_err_status_cnt[9];
3041  }
3042  
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3043  static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044  				const struct cntr_entry *entry,
3045  				void *context, int vl, int mode, u64 data)
3046  {
3047  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048  
3049  	return dd->send_pio_err_status_cnt[8];
3050  }
3051  
access_pio_sbrdctl_crrel_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3052  static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053  				const struct cntr_entry *entry,
3054  				void *context, int vl, int mode, u64 data)
3055  {
3056  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057  
3058  	return dd->send_pio_err_status_cnt[7];
3059  }
3060  
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3061  static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062  					      void *context, int vl, int mode,
3063  					      u64 data)
3064  {
3065  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066  
3067  	return dd->send_pio_err_status_cnt[6];
3068  }
3069  
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3070  static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071  					      void *context, int vl, int mode,
3072  					      u64 data)
3073  {
3074  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075  
3076  	return dd->send_pio_err_status_cnt[5];
3077  }
3078  
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3079  static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080  					   void *context, int vl, int mode,
3081  					   u64 data)
3082  {
3083  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084  
3085  	return dd->send_pio_err_status_cnt[4];
3086  }
3087  
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3088  static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089  					   void *context, int vl, int mode,
3090  					   u64 data)
3091  {
3092  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093  
3094  	return dd->send_pio_err_status_cnt[3];
3095  }
3096  
access_pio_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3097  static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098  					 void *context, int vl, int mode,
3099  					 u64 data)
3100  {
3101  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102  
3103  	return dd->send_pio_err_status_cnt[2];
3104  }
3105  
access_pio_write_addr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3106  static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107  						void *context, int vl,
3108  						int mode, u64 data)
3109  {
3110  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111  
3112  	return dd->send_pio_err_status_cnt[1];
3113  }
3114  
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3115  static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116  					     void *context, int vl, int mode,
3117  					     u64 data)
3118  {
3119  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120  
3121  	return dd->send_pio_err_status_cnt[0];
3122  }
3123  
3124  /*
3125   * Software counters corresponding to each of the
3126   * error status bits within SendDmaErrStatus
3127   */
access_sdma_pcie_req_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3128  static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129  				const struct cntr_entry *entry,
3130  				void *context, int vl, int mode, u64 data)
3131  {
3132  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133  
3134  	return dd->send_dma_err_status_cnt[3];
3135  }
3136  
access_sdma_pcie_req_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3137  static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138  				const struct cntr_entry *entry,
3139  				void *context, int vl, int mode, u64 data)
3140  {
3141  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142  
3143  	return dd->send_dma_err_status_cnt[2];
3144  }
3145  
access_sdma_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3146  static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147  					  void *context, int vl, int mode,
3148  					  u64 data)
3149  {
3150  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151  
3152  	return dd->send_dma_err_status_cnt[1];
3153  }
3154  
access_sdma_rpy_tag_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3155  static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156  				       void *context, int vl, int mode,
3157  				       u64 data)
3158  {
3159  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160  
3161  	return dd->send_dma_err_status_cnt[0];
3162  }
3163  
3164  /*
3165   * Software counters corresponding to each of the
3166   * error status bits within SendEgressErrStatus
3167   */
access_tx_read_pio_memory_csr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3168  static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169  				const struct cntr_entry *entry,
3170  				void *context, int vl, int mode, u64 data)
3171  {
3172  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173  
3174  	return dd->send_egress_err_status_cnt[63];
3175  }
3176  
access_tx_read_sdma_memory_csr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3177  static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178  				const struct cntr_entry *entry,
3179  				void *context, int vl, int mode, u64 data)
3180  {
3181  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182  
3183  	return dd->send_egress_err_status_cnt[62];
3184  }
3185  
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3186  static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187  					     void *context, int vl, int mode,
3188  					     u64 data)
3189  {
3190  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191  
3192  	return dd->send_egress_err_status_cnt[61];
3193  }
3194  
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3195  static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196  						 void *context, int vl,
3197  						 int mode, u64 data)
3198  {
3199  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200  
3201  	return dd->send_egress_err_status_cnt[60];
3202  }
3203  
access_tx_read_sdma_memory_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3204  static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205  				const struct cntr_entry *entry,
3206  				void *context, int vl, int mode, u64 data)
3207  {
3208  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209  
3210  	return dd->send_egress_err_status_cnt[59];
3211  }
3212  
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3213  static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214  					void *context, int vl, int mode,
3215  					u64 data)
3216  {
3217  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218  
3219  	return dd->send_egress_err_status_cnt[58];
3220  }
3221  
access_tx_credit_overrun_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3222  static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223  					    void *context, int vl, int mode,
3224  					    u64 data)
3225  {
3226  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227  
3228  	return dd->send_egress_err_status_cnt[57];
3229  }
3230  
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3231  static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232  					      void *context, int vl, int mode,
3233  					      u64 data)
3234  {
3235  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236  
3237  	return dd->send_egress_err_status_cnt[56];
3238  }
3239  
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3240  static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241  					      void *context, int vl, int mode,
3242  					      u64 data)
3243  {
3244  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245  
3246  	return dd->send_egress_err_status_cnt[55];
3247  }
3248  
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3249  static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250  					      void *context, int vl, int mode,
3251  					      u64 data)
3252  {
3253  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254  
3255  	return dd->send_egress_err_status_cnt[54];
3256  }
3257  
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3258  static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259  					      void *context, int vl, int mode,
3260  					      u64 data)
3261  {
3262  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263  
3264  	return dd->send_egress_err_status_cnt[53];
3265  }
3266  
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3267  static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268  					      void *context, int vl, int mode,
3269  					      u64 data)
3270  {
3271  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272  
3273  	return dd->send_egress_err_status_cnt[52];
3274  }
3275  
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3276  static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277  					      void *context, int vl, int mode,
3278  					      u64 data)
3279  {
3280  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281  
3282  	return dd->send_egress_err_status_cnt[51];
3283  }
3284  
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3285  static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286  					      void *context, int vl, int mode,
3287  					      u64 data)
3288  {
3289  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290  
3291  	return dd->send_egress_err_status_cnt[50];
3292  }
3293  
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3294  static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295  					      void *context, int vl, int mode,
3296  					      u64 data)
3297  {
3298  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299  
3300  	return dd->send_egress_err_status_cnt[49];
3301  }
3302  
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3303  static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304  					      void *context, int vl, int mode,
3305  					      u64 data)
3306  {
3307  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308  
3309  	return dd->send_egress_err_status_cnt[48];
3310  }
3311  
access_tx_credit_return_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3312  static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313  					      void *context, int vl, int mode,
3314  					      u64 data)
3315  {
3316  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317  
3318  	return dd->send_egress_err_status_cnt[47];
3319  }
3320  
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3321  static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322  					    void *context, int vl, int mode,
3323  					    u64 data)
3324  {
3325  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326  
3327  	return dd->send_egress_err_status_cnt[46];
3328  }
3329  
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3330  static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331  					     void *context, int vl, int mode,
3332  					     u64 data)
3333  {
3334  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335  
3336  	return dd->send_egress_err_status_cnt[45];
3337  }
3338  
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3339  static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340  						 void *context, int vl,
3341  						 int mode, u64 data)
3342  {
3343  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344  
3345  	return dd->send_egress_err_status_cnt[44];
3346  }
3347  
access_tx_read_sdma_memory_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3348  static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349  				const struct cntr_entry *entry,
3350  				void *context, int vl, int mode, u64 data)
3351  {
3352  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353  
3354  	return dd->send_egress_err_status_cnt[43];
3355  }
3356  
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3357  static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358  					void *context, int vl, int mode,
3359  					u64 data)
3360  {
3361  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362  
3363  	return dd->send_egress_err_status_cnt[42];
3364  }
3365  
access_tx_credit_return_partiy_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3366  static u64 access_tx_credit_return_partiy_err_cnt(
3367  				const struct cntr_entry *entry,
3368  				void *context, int vl, int mode, u64 data)
3369  {
3370  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371  
3372  	return dd->send_egress_err_status_cnt[41];
3373  }
3374  
access_tx_launch_fifo8_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3375  static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376  				const struct cntr_entry *entry,
3377  				void *context, int vl, int mode, u64 data)
3378  {
3379  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380  
3381  	return dd->send_egress_err_status_cnt[40];
3382  }
3383  
access_tx_launch_fifo7_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3384  static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385  				const struct cntr_entry *entry,
3386  				void *context, int vl, int mode, u64 data)
3387  {
3388  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389  
3390  	return dd->send_egress_err_status_cnt[39];
3391  }
3392  
access_tx_launch_fifo6_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3393  static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394  				const struct cntr_entry *entry,
3395  				void *context, int vl, int mode, u64 data)
3396  {
3397  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398  
3399  	return dd->send_egress_err_status_cnt[38];
3400  }
3401  
access_tx_launch_fifo5_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3402  static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403  				const struct cntr_entry *entry,
3404  				void *context, int vl, int mode, u64 data)
3405  {
3406  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407  
3408  	return dd->send_egress_err_status_cnt[37];
3409  }
3410  
access_tx_launch_fifo4_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3411  static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412  				const struct cntr_entry *entry,
3413  				void *context, int vl, int mode, u64 data)
3414  {
3415  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416  
3417  	return dd->send_egress_err_status_cnt[36];
3418  }
3419  
access_tx_launch_fifo3_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3420  static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421  				const struct cntr_entry *entry,
3422  				void *context, int vl, int mode, u64 data)
3423  {
3424  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425  
3426  	return dd->send_egress_err_status_cnt[35];
3427  }
3428  
access_tx_launch_fifo2_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3429  static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430  				const struct cntr_entry *entry,
3431  				void *context, int vl, int mode, u64 data)
3432  {
3433  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434  
3435  	return dd->send_egress_err_status_cnt[34];
3436  }
3437  
access_tx_launch_fifo1_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3438  static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439  				const struct cntr_entry *entry,
3440  				void *context, int vl, int mode, u64 data)
3441  {
3442  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443  
3444  	return dd->send_egress_err_status_cnt[33];
3445  }
3446  
access_tx_launch_fifo0_unc_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3447  static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448  				const struct cntr_entry *entry,
3449  				void *context, int vl, int mode, u64 data)
3450  {
3451  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452  
3453  	return dd->send_egress_err_status_cnt[32];
3454  }
3455  
access_tx_sdma15_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3456  static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457  				const struct cntr_entry *entry,
3458  				void *context, int vl, int mode, u64 data)
3459  {
3460  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461  
3462  	return dd->send_egress_err_status_cnt[31];
3463  }
3464  
access_tx_sdma14_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3465  static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466  				const struct cntr_entry *entry,
3467  				void *context, int vl, int mode, u64 data)
3468  {
3469  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470  
3471  	return dd->send_egress_err_status_cnt[30];
3472  }
3473  
access_tx_sdma13_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3474  static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475  				const struct cntr_entry *entry,
3476  				void *context, int vl, int mode, u64 data)
3477  {
3478  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479  
3480  	return dd->send_egress_err_status_cnt[29];
3481  }
3482  
access_tx_sdma12_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3483  static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484  				const struct cntr_entry *entry,
3485  				void *context, int vl, int mode, u64 data)
3486  {
3487  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488  
3489  	return dd->send_egress_err_status_cnt[28];
3490  }
3491  
access_tx_sdma11_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3492  static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493  				const struct cntr_entry *entry,
3494  				void *context, int vl, int mode, u64 data)
3495  {
3496  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497  
3498  	return dd->send_egress_err_status_cnt[27];
3499  }
3500  
access_tx_sdma10_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3501  static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502  				const struct cntr_entry *entry,
3503  				void *context, int vl, int mode, u64 data)
3504  {
3505  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506  
3507  	return dd->send_egress_err_status_cnt[26];
3508  }
3509  
access_tx_sdma9_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3510  static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511  				const struct cntr_entry *entry,
3512  				void *context, int vl, int mode, u64 data)
3513  {
3514  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515  
3516  	return dd->send_egress_err_status_cnt[25];
3517  }
3518  
access_tx_sdma8_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3519  static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520  				const struct cntr_entry *entry,
3521  				void *context, int vl, int mode, u64 data)
3522  {
3523  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524  
3525  	return dd->send_egress_err_status_cnt[24];
3526  }
3527  
access_tx_sdma7_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3528  static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529  				const struct cntr_entry *entry,
3530  				void *context, int vl, int mode, u64 data)
3531  {
3532  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533  
3534  	return dd->send_egress_err_status_cnt[23];
3535  }
3536  
access_tx_sdma6_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3537  static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538  				const struct cntr_entry *entry,
3539  				void *context, int vl, int mode, u64 data)
3540  {
3541  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542  
3543  	return dd->send_egress_err_status_cnt[22];
3544  }
3545  
access_tx_sdma5_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3546  static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547  				const struct cntr_entry *entry,
3548  				void *context, int vl, int mode, u64 data)
3549  {
3550  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551  
3552  	return dd->send_egress_err_status_cnt[21];
3553  }
3554  
access_tx_sdma4_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3555  static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556  				const struct cntr_entry *entry,
3557  				void *context, int vl, int mode, u64 data)
3558  {
3559  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560  
3561  	return dd->send_egress_err_status_cnt[20];
3562  }
3563  
access_tx_sdma3_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3564  static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565  				const struct cntr_entry *entry,
3566  				void *context, int vl, int mode, u64 data)
3567  {
3568  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569  
3570  	return dd->send_egress_err_status_cnt[19];
3571  }
3572  
access_tx_sdma2_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3573  static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574  				const struct cntr_entry *entry,
3575  				void *context, int vl, int mode, u64 data)
3576  {
3577  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578  
3579  	return dd->send_egress_err_status_cnt[18];
3580  }
3581  
access_tx_sdma1_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3582  static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583  				const struct cntr_entry *entry,
3584  				void *context, int vl, int mode, u64 data)
3585  {
3586  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587  
3588  	return dd->send_egress_err_status_cnt[17];
3589  }
3590  
access_tx_sdma0_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3591  static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592  				const struct cntr_entry *entry,
3593  				void *context, int vl, int mode, u64 data)
3594  {
3595  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596  
3597  	return dd->send_egress_err_status_cnt[16];
3598  }
3599  
access_tx_config_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3600  static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601  					   void *context, int vl, int mode,
3602  					   u64 data)
3603  {
3604  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605  
3606  	return dd->send_egress_err_status_cnt[15];
3607  }
3608  
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3609  static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610  						 void *context, int vl,
3611  						 int mode, u64 data)
3612  {
3613  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614  
3615  	return dd->send_egress_err_status_cnt[14];
3616  }
3617  
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3618  static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619  					       void *context, int vl, int mode,
3620  					       u64 data)
3621  {
3622  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623  
3624  	return dd->send_egress_err_status_cnt[13];
3625  }
3626  
access_tx_illegal_vl_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3627  static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628  					void *context, int vl, int mode,
3629  					u64 data)
3630  {
3631  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632  
3633  	return dd->send_egress_err_status_cnt[12];
3634  }
3635  
access_tx_sbrd_ctl_state_machine_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3636  static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637  				const struct cntr_entry *entry,
3638  				void *context, int vl, int mode, u64 data)
3639  {
3640  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641  
3642  	return dd->send_egress_err_status_cnt[11];
3643  }
3644  
access_egress_reserved_10_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3645  static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646  					     void *context, int vl, int mode,
3647  					     u64 data)
3648  {
3649  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650  
3651  	return dd->send_egress_err_status_cnt[10];
3652  }
3653  
access_egress_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3654  static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655  					    void *context, int vl, int mode,
3656  					    u64 data)
3657  {
3658  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659  
3660  	return dd->send_egress_err_status_cnt[9];
3661  }
3662  
access_tx_sdma_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3663  static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664  				const struct cntr_entry *entry,
3665  				void *context, int vl, int mode, u64 data)
3666  {
3667  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668  
3669  	return dd->send_egress_err_status_cnt[8];
3670  }
3671  
access_tx_pio_launch_intf_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3672  static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673  				const struct cntr_entry *entry,
3674  				void *context, int vl, int mode, u64 data)
3675  {
3676  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677  
3678  	return dd->send_egress_err_status_cnt[7];
3679  }
3680  
access_egress_reserved_6_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3681  static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682  					    void *context, int vl, int mode,
3683  					    u64 data)
3684  {
3685  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686  
3687  	return dd->send_egress_err_status_cnt[6];
3688  }
3689  
access_tx_incorrect_link_state_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3690  static u64 access_tx_incorrect_link_state_err_cnt(
3691  				const struct cntr_entry *entry,
3692  				void *context, int vl, int mode, u64 data)
3693  {
3694  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695  
3696  	return dd->send_egress_err_status_cnt[5];
3697  }
3698  
access_tx_linkdown_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3699  static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700  				      void *context, int vl, int mode,
3701  				      u64 data)
3702  {
3703  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704  
3705  	return dd->send_egress_err_status_cnt[4];
3706  }
3707  
access_tx_egress_fifi_underrun_or_parity_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3708  static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709  				const struct cntr_entry *entry,
3710  				void *context, int vl, int mode, u64 data)
3711  {
3712  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713  
3714  	return dd->send_egress_err_status_cnt[3];
3715  }
3716  
access_egress_reserved_2_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3717  static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718  					    void *context, int vl, int mode,
3719  					    u64 data)
3720  {
3721  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722  
3723  	return dd->send_egress_err_status_cnt[2];
3724  }
3725  
access_tx_pkt_integrity_mem_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3726  static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727  				const struct cntr_entry *entry,
3728  				void *context, int vl, int mode, u64 data)
3729  {
3730  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731  
3732  	return dd->send_egress_err_status_cnt[1];
3733  }
3734  
access_tx_pkt_integrity_mem_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3735  static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736  				const struct cntr_entry *entry,
3737  				void *context, int vl, int mode, u64 data)
3738  {
3739  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740  
3741  	return dd->send_egress_err_status_cnt[0];
3742  }
3743  
3744  /*
3745   * Software counters corresponding to each of the
3746   * error status bits within SendErrStatus
3747   */
access_send_csr_write_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3748  static u64 access_send_csr_write_bad_addr_err_cnt(
3749  				const struct cntr_entry *entry,
3750  				void *context, int vl, int mode, u64 data)
3751  {
3752  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753  
3754  	return dd->send_err_status_cnt[2];
3755  }
3756  
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3757  static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758  						 void *context, int vl,
3759  						 int mode, u64 data)
3760  {
3761  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762  
3763  	return dd->send_err_status_cnt[1];
3764  }
3765  
access_send_csr_parity_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3766  static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767  				      void *context, int vl, int mode,
3768  				      u64 data)
3769  {
3770  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771  
3772  	return dd->send_err_status_cnt[0];
3773  }
3774  
3775  /*
3776   * Software counters corresponding to each of the
3777   * error status bits within SendCtxtErrStatus
3778   */
access_pio_write_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3779  static u64 access_pio_write_out_of_bounds_err_cnt(
3780  				const struct cntr_entry *entry,
3781  				void *context, int vl, int mode, u64 data)
3782  {
3783  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784  
3785  	return dd->sw_ctxt_err_status_cnt[4];
3786  }
3787  
access_pio_write_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3788  static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789  					     void *context, int vl, int mode,
3790  					     u64 data)
3791  {
3792  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793  
3794  	return dd->sw_ctxt_err_status_cnt[3];
3795  }
3796  
access_pio_write_crosses_boundary_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3797  static u64 access_pio_write_crosses_boundary_err_cnt(
3798  				const struct cntr_entry *entry,
3799  				void *context, int vl, int mode, u64 data)
3800  {
3801  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802  
3803  	return dd->sw_ctxt_err_status_cnt[2];
3804  }
3805  
access_pio_disallowed_packet_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3806  static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807  						void *context, int vl,
3808  						int mode, u64 data)
3809  {
3810  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811  
3812  	return dd->sw_ctxt_err_status_cnt[1];
3813  }
3814  
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3815  static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816  					       void *context, int vl, int mode,
3817  					       u64 data)
3818  {
3819  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820  
3821  	return dd->sw_ctxt_err_status_cnt[0];
3822  }
3823  
3824  /*
3825   * Software counters corresponding to each of the
3826   * error status bits within SendDmaEngErrStatus
3827   */
access_sdma_header_request_fifo_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3828  static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829  				const struct cntr_entry *entry,
3830  				void *context, int vl, int mode, u64 data)
3831  {
3832  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833  
3834  	return dd->sw_send_dma_eng_err_status_cnt[23];
3835  }
3836  
access_sdma_header_storage_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3837  static u64 access_sdma_header_storage_cor_err_cnt(
3838  				const struct cntr_entry *entry,
3839  				void *context, int vl, int mode, u64 data)
3840  {
3841  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842  
3843  	return dd->sw_send_dma_eng_err_status_cnt[22];
3844  }
3845  
access_sdma_packet_tracking_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3846  static u64 access_sdma_packet_tracking_cor_err_cnt(
3847  				const struct cntr_entry *entry,
3848  				void *context, int vl, int mode, u64 data)
3849  {
3850  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851  
3852  	return dd->sw_send_dma_eng_err_status_cnt[21];
3853  }
3854  
access_sdma_assembly_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3855  static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856  					    void *context, int vl, int mode,
3857  					    u64 data)
3858  {
3859  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860  
3861  	return dd->sw_send_dma_eng_err_status_cnt[20];
3862  }
3863  
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3864  static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865  					      void *context, int vl, int mode,
3866  					      u64 data)
3867  {
3868  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869  
3870  	return dd->sw_send_dma_eng_err_status_cnt[19];
3871  }
3872  
access_sdma_header_request_fifo_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3873  static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874  				const struct cntr_entry *entry,
3875  				void *context, int vl, int mode, u64 data)
3876  {
3877  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878  
3879  	return dd->sw_send_dma_eng_err_status_cnt[18];
3880  }
3881  
access_sdma_header_storage_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3882  static u64 access_sdma_header_storage_unc_err_cnt(
3883  				const struct cntr_entry *entry,
3884  				void *context, int vl, int mode, u64 data)
3885  {
3886  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887  
3888  	return dd->sw_send_dma_eng_err_status_cnt[17];
3889  }
3890  
access_sdma_packet_tracking_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3891  static u64 access_sdma_packet_tracking_unc_err_cnt(
3892  				const struct cntr_entry *entry,
3893  				void *context, int vl, int mode, u64 data)
3894  {
3895  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896  
3897  	return dd->sw_send_dma_eng_err_status_cnt[16];
3898  }
3899  
access_sdma_assembly_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3900  static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901  					    void *context, int vl, int mode,
3902  					    u64 data)
3903  {
3904  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905  
3906  	return dd->sw_send_dma_eng_err_status_cnt[15];
3907  }
3908  
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3909  static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910  					      void *context, int vl, int mode,
3911  					      u64 data)
3912  {
3913  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914  
3915  	return dd->sw_send_dma_eng_err_status_cnt[14];
3916  }
3917  
access_sdma_timeout_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3918  static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919  				       void *context, int vl, int mode,
3920  				       u64 data)
3921  {
3922  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923  
3924  	return dd->sw_send_dma_eng_err_status_cnt[13];
3925  }
3926  
access_sdma_header_length_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3927  static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928  					     void *context, int vl, int mode,
3929  					     u64 data)
3930  {
3931  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932  
3933  	return dd->sw_send_dma_eng_err_status_cnt[12];
3934  }
3935  
access_sdma_header_address_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3936  static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937  					      void *context, int vl, int mode,
3938  					      u64 data)
3939  {
3940  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941  
3942  	return dd->sw_send_dma_eng_err_status_cnt[11];
3943  }
3944  
access_sdma_header_select_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3945  static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946  					     void *context, int vl, int mode,
3947  					     u64 data)
3948  {
3949  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950  
3951  	return dd->sw_send_dma_eng_err_status_cnt[10];
3952  }
3953  
access_sdma_reserved_9_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3954  static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955  					  void *context, int vl, int mode,
3956  					  u64 data)
3957  {
3958  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959  
3960  	return dd->sw_send_dma_eng_err_status_cnt[9];
3961  }
3962  
access_sdma_packet_desc_overflow_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3963  static u64 access_sdma_packet_desc_overflow_err_cnt(
3964  				const struct cntr_entry *entry,
3965  				void *context, int vl, int mode, u64 data)
3966  {
3967  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968  
3969  	return dd->sw_send_dma_eng_err_status_cnt[8];
3970  }
3971  
access_sdma_length_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3972  static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973  					       void *context, int vl,
3974  					       int mode, u64 data)
3975  {
3976  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977  
3978  	return dd->sw_send_dma_eng_err_status_cnt[7];
3979  }
3980  
access_sdma_halt_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3981  static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982  				    void *context, int vl, int mode, u64 data)
3983  {
3984  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985  
3986  	return dd->sw_send_dma_eng_err_status_cnt[6];
3987  }
3988  
access_sdma_mem_read_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3989  static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990  					void *context, int vl, int mode,
3991  					u64 data)
3992  {
3993  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994  
3995  	return dd->sw_send_dma_eng_err_status_cnt[5];
3996  }
3997  
access_sdma_first_desc_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)3998  static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999  					  void *context, int vl, int mode,
4000  					  u64 data)
4001  {
4002  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003  
4004  	return dd->sw_send_dma_eng_err_status_cnt[4];
4005  }
4006  
access_sdma_tail_out_of_bounds_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4007  static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008  				const struct cntr_entry *entry,
4009  				void *context, int vl, int mode, u64 data)
4010  {
4011  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012  
4013  	return dd->sw_send_dma_eng_err_status_cnt[3];
4014  }
4015  
access_sdma_too_long_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4016  static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017  					void *context, int vl, int mode,
4018  					u64 data)
4019  {
4020  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021  
4022  	return dd->sw_send_dma_eng_err_status_cnt[2];
4023  }
4024  
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4025  static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026  					    void *context, int vl, int mode,
4027  					    u64 data)
4028  {
4029  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030  
4031  	return dd->sw_send_dma_eng_err_status_cnt[1];
4032  }
4033  
access_sdma_wrong_dw_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4034  static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035  					void *context, int vl, int mode,
4036  					u64 data)
4037  {
4038  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039  
4040  	return dd->sw_send_dma_eng_err_status_cnt[0];
4041  }
4042  
access_dc_rcv_err_cnt(const struct cntr_entry * entry,void * context,int vl,int mode,u64 data)4043  static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044  				 void *context, int vl, int mode,
4045  				 u64 data)
4046  {
4047  	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048  
4049  	u64 val = 0;
4050  	u64 csr = entry->csr;
4051  
4052  	val = read_write_csr(dd, csr, mode, data);
4053  	if (mode == CNTR_MODE_R) {
4054  		val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055  			CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056  	} else if (mode == CNTR_MODE_W) {
4057  		dd->sw_rcv_bypass_packet_errors = 0;
4058  	} else {
4059  		dd_dev_err(dd, "Invalid cntr register access mode");
4060  		return 0;
4061  	}
4062  	return val;
4063  }
4064  
4065  #define def_access_sw_cpu(cntr) \
4066  static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,		      \
4067  			      void *context, int vl, int mode, u64 data)      \
4068  {									      \
4069  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4070  	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
4071  			      ppd->ibport_data.rvp.cntr, vl,		      \
4072  			      mode, data);				      \
4073  }
4074  
4075  def_access_sw_cpu(rc_acks);
4076  def_access_sw_cpu(rc_qacks);
4077  def_access_sw_cpu(rc_delayed_comp);
4078  
4079  #define def_access_ibp_counter(cntr) \
4080  static u64 access_ibp_##cntr(const struct cntr_entry *entry,		      \
4081  				void *context, int vl, int mode, u64 data)    \
4082  {									      \
4083  	struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;	      \
4084  									      \
4085  	if (vl != CNTR_INVALID_VL)					      \
4086  		return 0;						      \
4087  									      \
4088  	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
4089  			     mode, data);				      \
4090  }
4091  
4092  def_access_ibp_counter(loop_pkts);
4093  def_access_ibp_counter(rc_resends);
4094  def_access_ibp_counter(rnr_naks);
4095  def_access_ibp_counter(other_naks);
4096  def_access_ibp_counter(rc_timeouts);
4097  def_access_ibp_counter(pkt_drops);
4098  def_access_ibp_counter(dmawait);
4099  def_access_ibp_counter(rc_seqnak);
4100  def_access_ibp_counter(rc_dupreq);
4101  def_access_ibp_counter(rdma_seq);
4102  def_access_ibp_counter(unaligned);
4103  def_access_ibp_counter(seq_naks);
4104  def_access_ibp_counter(rc_crwaits);
4105  
4106  static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4107  [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4108  [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4109  [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4110  [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4111  [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4112  			CNTR_NORMAL),
4113  [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4114  			CNTR_NORMAL),
4115  [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4116  			RCV_TID_FLOW_GEN_MISMATCH_CNT,
4117  			CNTR_NORMAL),
4118  [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4119  			CNTR_NORMAL),
4120  [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4121  			RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4122  [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4123  			CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4124  [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4125  			CNTR_NORMAL),
4126  [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4127  			CNTR_NORMAL),
4128  [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4129  			CNTR_NORMAL),
4130  [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4131  			CNTR_NORMAL),
4132  [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4133  			CNTR_NORMAL),
4134  [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4135  			CNTR_NORMAL),
4136  [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4137  			CCE_RCV_URGENT_INT_CNT,	CNTR_NORMAL),
4138  [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4139  			CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4140  [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4141  			      CNTR_SYNTH),
4142  [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4143  			    access_dc_rcv_err_cnt),
4144  [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4145  				 CNTR_SYNTH),
4146  [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4147  				  CNTR_SYNTH),
4148  [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4149  				  CNTR_SYNTH),
4150  [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4151  				   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4152  [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4153  				  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4154  				  CNTR_SYNTH),
4155  [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4156  				DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4157  [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4158  			       CNTR_SYNTH),
4159  [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4160  			      CNTR_SYNTH),
4161  [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4162  			       CNTR_SYNTH),
4163  [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4164  				 CNTR_SYNTH),
4165  [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4166  				CNTR_SYNTH),
4167  [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4168  				CNTR_SYNTH),
4169  [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4170  			       CNTR_SYNTH),
4171  [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4172  				 CNTR_SYNTH | CNTR_VL),
4173  [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4174  				CNTR_SYNTH | CNTR_VL),
4175  [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4176  [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4177  				 CNTR_SYNTH | CNTR_VL),
4178  [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4179  [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4180  				 CNTR_SYNTH | CNTR_VL),
4181  [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4182  			      CNTR_SYNTH),
4183  [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4184  				 CNTR_SYNTH | CNTR_VL),
4185  [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4186  				CNTR_SYNTH),
4187  [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4188  				   CNTR_SYNTH | CNTR_VL),
4189  [C_DC_TOTAL_CRC] =
4190  	DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4191  			 CNTR_SYNTH),
4192  [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4193  				  CNTR_SYNTH),
4194  [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4195  				  CNTR_SYNTH),
4196  [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4197  				  CNTR_SYNTH),
4198  [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4199  				  CNTR_SYNTH),
4200  [C_DC_CRC_MULT_LN] =
4201  	DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4202  			 CNTR_SYNTH),
4203  [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4204  				    CNTR_SYNTH),
4205  [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4206  				    CNTR_SYNTH),
4207  [C_DC_SEQ_CRC_CNT] =
4208  	DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4209  			 CNTR_SYNTH),
4210  [C_DC_ESC0_ONLY_CNT] =
4211  	DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4212  			 CNTR_SYNTH),
4213  [C_DC_ESC0_PLUS1_CNT] =
4214  	DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4215  			 CNTR_SYNTH),
4216  [C_DC_ESC0_PLUS2_CNT] =
4217  	DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4218  			 CNTR_SYNTH),
4219  [C_DC_REINIT_FROM_PEER_CNT] =
4220  	DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4221  			 CNTR_SYNTH),
4222  [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4223  				  CNTR_SYNTH),
4224  [C_DC_MISC_FLG_CNT] =
4225  	DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4226  			 CNTR_SYNTH),
4227  [C_DC_PRF_GOOD_LTP_CNT] =
4228  	DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4229  [C_DC_PRF_ACCEPTED_LTP_CNT] =
4230  	DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4231  			 CNTR_SYNTH),
4232  [C_DC_PRF_RX_FLIT_CNT] =
4233  	DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4234  [C_DC_PRF_TX_FLIT_CNT] =
4235  	DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4236  [C_DC_PRF_CLK_CNTR] =
4237  	DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4238  [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4239  	DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4240  [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4241  	DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4242  			 CNTR_SYNTH),
4243  [C_DC_PG_STS_TX_SBE_CNT] =
4244  	DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4245  [C_DC_PG_STS_TX_MBE_CNT] =
4246  	DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4247  			 CNTR_SYNTH),
4248  [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4249  			    access_sw_cpu_intr),
4250  [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4251  			    access_sw_cpu_rcv_limit),
4252  [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4253  			    access_sw_vtx_wait),
4254  [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4255  			    access_sw_pio_wait),
4256  [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4257  			    access_sw_pio_drain),
4258  [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4259  			    access_sw_kmem_wait),
4260  [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4261  			    hfi1_access_sw_tid_wait),
4262  [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4263  			    access_sw_send_schedule),
4264  [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4265  				      SEND_DMA_DESC_FETCHED_CNT, 0,
4266  				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267  				      dev_access_u32_csr),
4268  [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4269  			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4270  			     access_sde_int_cnt),
4271  [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4272  			     CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4273  			     access_sde_err_cnt),
4274  [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4275  				  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4276  				  access_sde_idle_int_cnt),
4277  [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4278  				      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4279  				      access_sde_progress_int_cnt),
4280  /* MISC_ERR_STATUS */
4281  [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4282  				CNTR_NORMAL,
4283  				access_misc_pll_lock_fail_err_cnt),
4284  [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4285  				CNTR_NORMAL,
4286  				access_misc_mbist_fail_err_cnt),
4287  [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4288  				CNTR_NORMAL,
4289  				access_misc_invalid_eep_cmd_err_cnt),
4290  [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4291  				CNTR_NORMAL,
4292  				access_misc_efuse_done_parity_err_cnt),
4293  [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4294  				CNTR_NORMAL,
4295  				access_misc_efuse_write_err_cnt),
4296  [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4297  				0, CNTR_NORMAL,
4298  				access_misc_efuse_read_bad_addr_err_cnt),
4299  [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4300  				CNTR_NORMAL,
4301  				access_misc_efuse_csr_parity_err_cnt),
4302  [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4303  				CNTR_NORMAL,
4304  				access_misc_fw_auth_failed_err_cnt),
4305  [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4306  				CNTR_NORMAL,
4307  				access_misc_key_mismatch_err_cnt),
4308  [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4309  				CNTR_NORMAL,
4310  				access_misc_sbus_write_failed_err_cnt),
4311  [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4312  				CNTR_NORMAL,
4313  				access_misc_csr_write_bad_addr_err_cnt),
4314  [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4315  				CNTR_NORMAL,
4316  				access_misc_csr_read_bad_addr_err_cnt),
4317  [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4318  				CNTR_NORMAL,
4319  				access_misc_csr_parity_err_cnt),
4320  /* CceErrStatus */
4321  [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4322  				CNTR_NORMAL,
4323  				access_sw_cce_err_status_aggregated_cnt),
4324  [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4325  				CNTR_NORMAL,
4326  				access_cce_msix_csr_parity_err_cnt),
4327  [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4328  				CNTR_NORMAL,
4329  				access_cce_int_map_unc_err_cnt),
4330  [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4331  				CNTR_NORMAL,
4332  				access_cce_int_map_cor_err_cnt),
4333  [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4334  				CNTR_NORMAL,
4335  				access_cce_msix_table_unc_err_cnt),
4336  [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4337  				CNTR_NORMAL,
4338  				access_cce_msix_table_cor_err_cnt),
4339  [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4340  				0, CNTR_NORMAL,
4341  				access_cce_rxdma_conv_fifo_parity_err_cnt),
4342  [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4343  				0, CNTR_NORMAL,
4344  				access_cce_rcpl_async_fifo_parity_err_cnt),
4345  [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4346  				CNTR_NORMAL,
4347  				access_cce_seg_write_bad_addr_err_cnt),
4348  [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4349  				CNTR_NORMAL,
4350  				access_cce_seg_read_bad_addr_err_cnt),
4351  [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4352  				CNTR_NORMAL,
4353  				access_la_triggered_cnt),
4354  [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4355  				CNTR_NORMAL,
4356  				access_cce_trgt_cpl_timeout_err_cnt),
4357  [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4358  				CNTR_NORMAL,
4359  				access_pcic_receive_parity_err_cnt),
4360  [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4361  				CNTR_NORMAL,
4362  				access_pcic_transmit_back_parity_err_cnt),
4363  [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4364  				0, CNTR_NORMAL,
4365  				access_pcic_transmit_front_parity_err_cnt),
4366  [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4367  				CNTR_NORMAL,
4368  				access_pcic_cpl_dat_q_unc_err_cnt),
4369  [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4370  				CNTR_NORMAL,
4371  				access_pcic_cpl_hd_q_unc_err_cnt),
4372  [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4373  				CNTR_NORMAL,
4374  				access_pcic_post_dat_q_unc_err_cnt),
4375  [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4376  				CNTR_NORMAL,
4377  				access_pcic_post_hd_q_unc_err_cnt),
4378  [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4379  				CNTR_NORMAL,
4380  				access_pcic_retry_sot_mem_unc_err_cnt),
4381  [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4382  				CNTR_NORMAL,
4383  				access_pcic_retry_mem_unc_err),
4384  [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4385  				CNTR_NORMAL,
4386  				access_pcic_n_post_dat_q_parity_err_cnt),
4387  [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4388  				CNTR_NORMAL,
4389  				access_pcic_n_post_h_q_parity_err_cnt),
4390  [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4391  				CNTR_NORMAL,
4392  				access_pcic_cpl_dat_q_cor_err_cnt),
4393  [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4394  				CNTR_NORMAL,
4395  				access_pcic_cpl_hd_q_cor_err_cnt),
4396  [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4397  				CNTR_NORMAL,
4398  				access_pcic_post_dat_q_cor_err_cnt),
4399  [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4400  				CNTR_NORMAL,
4401  				access_pcic_post_hd_q_cor_err_cnt),
4402  [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4403  				CNTR_NORMAL,
4404  				access_pcic_retry_sot_mem_cor_err_cnt),
4405  [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4406  				CNTR_NORMAL,
4407  				access_pcic_retry_mem_cor_err_cnt),
4408  [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4409  				"CceCli1AsyncFifoDbgParityError", 0, 0,
4410  				CNTR_NORMAL,
4411  				access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4412  [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4413  				"CceCli1AsyncFifoRxdmaParityError", 0, 0,
4414  				CNTR_NORMAL,
4415  				access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4416  				),
4417  [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4418  			"CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4419  			CNTR_NORMAL,
4420  			access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4421  [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4422  			"CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4423  			CNTR_NORMAL,
4424  			access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4425  [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4426  			0, CNTR_NORMAL,
4427  			access_cce_cli2_async_fifo_parity_err_cnt),
4428  [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4429  			CNTR_NORMAL,
4430  			access_cce_csr_cfg_bus_parity_err_cnt),
4431  [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4432  			0, CNTR_NORMAL,
4433  			access_cce_cli0_async_fifo_parity_err_cnt),
4434  [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4435  			CNTR_NORMAL,
4436  			access_cce_rspd_data_parity_err_cnt),
4437  [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4438  			CNTR_NORMAL,
4439  			access_cce_trgt_access_err_cnt),
4440  [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4441  			0, CNTR_NORMAL,
4442  			access_cce_trgt_async_fifo_parity_err_cnt),
4443  [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4444  			CNTR_NORMAL,
4445  			access_cce_csr_write_bad_addr_err_cnt),
4446  [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4447  			CNTR_NORMAL,
4448  			access_cce_csr_read_bad_addr_err_cnt),
4449  [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4450  			CNTR_NORMAL,
4451  			access_ccs_csr_parity_err_cnt),
4452  
4453  /* RcvErrStatus */
4454  [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4455  			CNTR_NORMAL,
4456  			access_rx_csr_parity_err_cnt),
4457  [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4458  			CNTR_NORMAL,
4459  			access_rx_csr_write_bad_addr_err_cnt),
4460  [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4461  			CNTR_NORMAL,
4462  			access_rx_csr_read_bad_addr_err_cnt),
4463  [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4464  			CNTR_NORMAL,
4465  			access_rx_dma_csr_unc_err_cnt),
4466  [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4467  			CNTR_NORMAL,
4468  			access_rx_dma_dq_fsm_encoding_err_cnt),
4469  [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4470  			CNTR_NORMAL,
4471  			access_rx_dma_eq_fsm_encoding_err_cnt),
4472  [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4473  			CNTR_NORMAL,
4474  			access_rx_dma_csr_parity_err_cnt),
4475  [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4476  			CNTR_NORMAL,
4477  			access_rx_rbuf_data_cor_err_cnt),
4478  [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4479  			CNTR_NORMAL,
4480  			access_rx_rbuf_data_unc_err_cnt),
4481  [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4482  			CNTR_NORMAL,
4483  			access_rx_dma_data_fifo_rd_cor_err_cnt),
4484  [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4485  			CNTR_NORMAL,
4486  			access_rx_dma_data_fifo_rd_unc_err_cnt),
4487  [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4488  			CNTR_NORMAL,
4489  			access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4490  [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4491  			CNTR_NORMAL,
4492  			access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4493  [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4494  			CNTR_NORMAL,
4495  			access_rx_rbuf_desc_part2_cor_err_cnt),
4496  [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4497  			CNTR_NORMAL,
4498  			access_rx_rbuf_desc_part2_unc_err_cnt),
4499  [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4500  			CNTR_NORMAL,
4501  			access_rx_rbuf_desc_part1_cor_err_cnt),
4502  [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4503  			CNTR_NORMAL,
4504  			access_rx_rbuf_desc_part1_unc_err_cnt),
4505  [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4506  			CNTR_NORMAL,
4507  			access_rx_hq_intr_fsm_err_cnt),
4508  [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4509  			CNTR_NORMAL,
4510  			access_rx_hq_intr_csr_parity_err_cnt),
4511  [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4512  			CNTR_NORMAL,
4513  			access_rx_lookup_csr_parity_err_cnt),
4514  [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4515  			CNTR_NORMAL,
4516  			access_rx_lookup_rcv_array_cor_err_cnt),
4517  [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4518  			CNTR_NORMAL,
4519  			access_rx_lookup_rcv_array_unc_err_cnt),
4520  [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4521  			0, CNTR_NORMAL,
4522  			access_rx_lookup_des_part2_parity_err_cnt),
4523  [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4524  			0, CNTR_NORMAL,
4525  			access_rx_lookup_des_part1_unc_cor_err_cnt),
4526  [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4527  			CNTR_NORMAL,
4528  			access_rx_lookup_des_part1_unc_err_cnt),
4529  [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4530  			CNTR_NORMAL,
4531  			access_rx_rbuf_next_free_buf_cor_err_cnt),
4532  [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4533  			CNTR_NORMAL,
4534  			access_rx_rbuf_next_free_buf_unc_err_cnt),
4535  [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4536  			"RxRbufFlInitWrAddrParityErr", 0, 0,
4537  			CNTR_NORMAL,
4538  			access_rbuf_fl_init_wr_addr_parity_err_cnt),
4539  [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4540  			0, CNTR_NORMAL,
4541  			access_rx_rbuf_fl_initdone_parity_err_cnt),
4542  [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4543  			0, CNTR_NORMAL,
4544  			access_rx_rbuf_fl_write_addr_parity_err_cnt),
4545  [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4546  			CNTR_NORMAL,
4547  			access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4548  [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4549  			CNTR_NORMAL,
4550  			access_rx_rbuf_empty_err_cnt),
4551  [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4552  			CNTR_NORMAL,
4553  			access_rx_rbuf_full_err_cnt),
4554  [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4555  			CNTR_NORMAL,
4556  			access_rbuf_bad_lookup_err_cnt),
4557  [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4558  			CNTR_NORMAL,
4559  			access_rbuf_ctx_id_parity_err_cnt),
4560  [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4561  			CNTR_NORMAL,
4562  			access_rbuf_csr_qeopdw_parity_err_cnt),
4563  [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4564  			"RxRbufCsrQNumOfPktParityErr", 0, 0,
4565  			CNTR_NORMAL,
4566  			access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4567  [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4568  			"RxRbufCsrQTlPtrParityErr", 0, 0,
4569  			CNTR_NORMAL,
4570  			access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4571  [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4572  			0, CNTR_NORMAL,
4573  			access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4574  [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4575  			0, CNTR_NORMAL,
4576  			access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4577  [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4578  			0, 0, CNTR_NORMAL,
4579  			access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4580  [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4581  			0, CNTR_NORMAL,
4582  			access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4583  [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4584  			"RxRbufCsrQHeadBufNumParityErr", 0, 0,
4585  			CNTR_NORMAL,
4586  			access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4587  [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4588  			0, CNTR_NORMAL,
4589  			access_rx_rbuf_block_list_read_cor_err_cnt),
4590  [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4591  			0, CNTR_NORMAL,
4592  			access_rx_rbuf_block_list_read_unc_err_cnt),
4593  [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4594  			CNTR_NORMAL,
4595  			access_rx_rbuf_lookup_des_cor_err_cnt),
4596  [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4597  			CNTR_NORMAL,
4598  			access_rx_rbuf_lookup_des_unc_err_cnt),
4599  [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4600  			"RxRbufLookupDesRegUncCorErr", 0, 0,
4601  			CNTR_NORMAL,
4602  			access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4603  [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4604  			CNTR_NORMAL,
4605  			access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4606  [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4607  			CNTR_NORMAL,
4608  			access_rx_rbuf_free_list_cor_err_cnt),
4609  [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4610  			CNTR_NORMAL,
4611  			access_rx_rbuf_free_list_unc_err_cnt),
4612  [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4613  			CNTR_NORMAL,
4614  			access_rx_rcv_fsm_encoding_err_cnt),
4615  [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4616  			CNTR_NORMAL,
4617  			access_rx_dma_flag_cor_err_cnt),
4618  [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4619  			CNTR_NORMAL,
4620  			access_rx_dma_flag_unc_err_cnt),
4621  [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4622  			CNTR_NORMAL,
4623  			access_rx_dc_sop_eop_parity_err_cnt),
4624  [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4625  			CNTR_NORMAL,
4626  			access_rx_rcv_csr_parity_err_cnt),
4627  [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4628  			CNTR_NORMAL,
4629  			access_rx_rcv_qp_map_table_cor_err_cnt),
4630  [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4631  			CNTR_NORMAL,
4632  			access_rx_rcv_qp_map_table_unc_err_cnt),
4633  [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4634  			CNTR_NORMAL,
4635  			access_rx_rcv_data_cor_err_cnt),
4636  [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4637  			CNTR_NORMAL,
4638  			access_rx_rcv_data_unc_err_cnt),
4639  [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4640  			CNTR_NORMAL,
4641  			access_rx_rcv_hdr_cor_err_cnt),
4642  [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4643  			CNTR_NORMAL,
4644  			access_rx_rcv_hdr_unc_err_cnt),
4645  [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4646  			CNTR_NORMAL,
4647  			access_rx_dc_intf_parity_err_cnt),
4648  [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4649  			CNTR_NORMAL,
4650  			access_rx_dma_csr_cor_err_cnt),
4651  /* SendPioErrStatus */
4652  [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4653  			CNTR_NORMAL,
4654  			access_pio_pec_sop_head_parity_err_cnt),
4655  [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4656  			CNTR_NORMAL,
4657  			access_pio_pcc_sop_head_parity_err_cnt),
4658  [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4659  			0, 0, CNTR_NORMAL,
4660  			access_pio_last_returned_cnt_parity_err_cnt),
4661  [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4662  			0, CNTR_NORMAL,
4663  			access_pio_current_free_cnt_parity_err_cnt),
4664  [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4665  			CNTR_NORMAL,
4666  			access_pio_reserved_31_err_cnt),
4667  [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4668  			CNTR_NORMAL,
4669  			access_pio_reserved_30_err_cnt),
4670  [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4671  			CNTR_NORMAL,
4672  			access_pio_ppmc_sop_len_err_cnt),
4673  [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4674  			CNTR_NORMAL,
4675  			access_pio_ppmc_bqc_mem_parity_err_cnt),
4676  [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4677  			CNTR_NORMAL,
4678  			access_pio_vl_fifo_parity_err_cnt),
4679  [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4680  			CNTR_NORMAL,
4681  			access_pio_vlf_sop_parity_err_cnt),
4682  [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4683  			CNTR_NORMAL,
4684  			access_pio_vlf_v1_len_parity_err_cnt),
4685  [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4686  			CNTR_NORMAL,
4687  			access_pio_block_qw_count_parity_err_cnt),
4688  [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4689  			CNTR_NORMAL,
4690  			access_pio_write_qw_valid_parity_err_cnt),
4691  [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4692  			CNTR_NORMAL,
4693  			access_pio_state_machine_err_cnt),
4694  [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4695  			CNTR_NORMAL,
4696  			access_pio_write_data_parity_err_cnt),
4697  [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4698  			CNTR_NORMAL,
4699  			access_pio_host_addr_mem_cor_err_cnt),
4700  [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4701  			CNTR_NORMAL,
4702  			access_pio_host_addr_mem_unc_err_cnt),
4703  [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4704  			CNTR_NORMAL,
4705  			access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4706  [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4707  			CNTR_NORMAL,
4708  			access_pio_init_sm_in_err_cnt),
4709  [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4710  			CNTR_NORMAL,
4711  			access_pio_ppmc_pbl_fifo_err_cnt),
4712  [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4713  			0, CNTR_NORMAL,
4714  			access_pio_credit_ret_fifo_parity_err_cnt),
4715  [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4716  			CNTR_NORMAL,
4717  			access_pio_v1_len_mem_bank1_cor_err_cnt),
4718  [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4719  			CNTR_NORMAL,
4720  			access_pio_v1_len_mem_bank0_cor_err_cnt),
4721  [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4722  			CNTR_NORMAL,
4723  			access_pio_v1_len_mem_bank1_unc_err_cnt),
4724  [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4725  			CNTR_NORMAL,
4726  			access_pio_v1_len_mem_bank0_unc_err_cnt),
4727  [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4728  			CNTR_NORMAL,
4729  			access_pio_sm_pkt_reset_parity_err_cnt),
4730  [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4731  			CNTR_NORMAL,
4732  			access_pio_pkt_evict_fifo_parity_err_cnt),
4733  [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4734  			"PioSbrdctrlCrrelFifoParityErr", 0, 0,
4735  			CNTR_NORMAL,
4736  			access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4737  [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4738  			CNTR_NORMAL,
4739  			access_pio_sbrdctl_crrel_parity_err_cnt),
4740  [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4741  			CNTR_NORMAL,
4742  			access_pio_pec_fifo_parity_err_cnt),
4743  [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4744  			CNTR_NORMAL,
4745  			access_pio_pcc_fifo_parity_err_cnt),
4746  [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4747  			CNTR_NORMAL,
4748  			access_pio_sb_mem_fifo1_err_cnt),
4749  [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4750  			CNTR_NORMAL,
4751  			access_pio_sb_mem_fifo0_err_cnt),
4752  [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4753  			CNTR_NORMAL,
4754  			access_pio_csr_parity_err_cnt),
4755  [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4756  			CNTR_NORMAL,
4757  			access_pio_write_addr_parity_err_cnt),
4758  [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4759  			CNTR_NORMAL,
4760  			access_pio_write_bad_ctxt_err_cnt),
4761  /* SendDmaErrStatus */
4762  [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4763  			0, CNTR_NORMAL,
4764  			access_sdma_pcie_req_tracking_cor_err_cnt),
4765  [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4766  			0, CNTR_NORMAL,
4767  			access_sdma_pcie_req_tracking_unc_err_cnt),
4768  [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4769  			CNTR_NORMAL,
4770  			access_sdma_csr_parity_err_cnt),
4771  [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4772  			CNTR_NORMAL,
4773  			access_sdma_rpy_tag_err_cnt),
4774  /* SendEgressErrStatus */
4775  [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4776  			CNTR_NORMAL,
4777  			access_tx_read_pio_memory_csr_unc_err_cnt),
4778  [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4779  			0, CNTR_NORMAL,
4780  			access_tx_read_sdma_memory_csr_err_cnt),
4781  [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4782  			CNTR_NORMAL,
4783  			access_tx_egress_fifo_cor_err_cnt),
4784  [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4785  			CNTR_NORMAL,
4786  			access_tx_read_pio_memory_cor_err_cnt),
4787  [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4788  			CNTR_NORMAL,
4789  			access_tx_read_sdma_memory_cor_err_cnt),
4790  [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4791  			CNTR_NORMAL,
4792  			access_tx_sb_hdr_cor_err_cnt),
4793  [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4794  			CNTR_NORMAL,
4795  			access_tx_credit_overrun_err_cnt),
4796  [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4797  			CNTR_NORMAL,
4798  			access_tx_launch_fifo8_cor_err_cnt),
4799  [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4800  			CNTR_NORMAL,
4801  			access_tx_launch_fifo7_cor_err_cnt),
4802  [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4803  			CNTR_NORMAL,
4804  			access_tx_launch_fifo6_cor_err_cnt),
4805  [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4806  			CNTR_NORMAL,
4807  			access_tx_launch_fifo5_cor_err_cnt),
4808  [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4809  			CNTR_NORMAL,
4810  			access_tx_launch_fifo4_cor_err_cnt),
4811  [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4812  			CNTR_NORMAL,
4813  			access_tx_launch_fifo3_cor_err_cnt),
4814  [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4815  			CNTR_NORMAL,
4816  			access_tx_launch_fifo2_cor_err_cnt),
4817  [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4818  			CNTR_NORMAL,
4819  			access_tx_launch_fifo1_cor_err_cnt),
4820  [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4821  			CNTR_NORMAL,
4822  			access_tx_launch_fifo0_cor_err_cnt),
4823  [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4824  			CNTR_NORMAL,
4825  			access_tx_credit_return_vl_err_cnt),
4826  [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4827  			CNTR_NORMAL,
4828  			access_tx_hcrc_insertion_err_cnt),
4829  [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4830  			CNTR_NORMAL,
4831  			access_tx_egress_fifo_unc_err_cnt),
4832  [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4833  			CNTR_NORMAL,
4834  			access_tx_read_pio_memory_unc_err_cnt),
4835  [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4836  			CNTR_NORMAL,
4837  			access_tx_read_sdma_memory_unc_err_cnt),
4838  [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4839  			CNTR_NORMAL,
4840  			access_tx_sb_hdr_unc_err_cnt),
4841  [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4842  			CNTR_NORMAL,
4843  			access_tx_credit_return_partiy_err_cnt),
4844  [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4845  			0, 0, CNTR_NORMAL,
4846  			access_tx_launch_fifo8_unc_or_parity_err_cnt),
4847  [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4848  			0, 0, CNTR_NORMAL,
4849  			access_tx_launch_fifo7_unc_or_parity_err_cnt),
4850  [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4851  			0, 0, CNTR_NORMAL,
4852  			access_tx_launch_fifo6_unc_or_parity_err_cnt),
4853  [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4854  			0, 0, CNTR_NORMAL,
4855  			access_tx_launch_fifo5_unc_or_parity_err_cnt),
4856  [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4857  			0, 0, CNTR_NORMAL,
4858  			access_tx_launch_fifo4_unc_or_parity_err_cnt),
4859  [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4860  			0, 0, CNTR_NORMAL,
4861  			access_tx_launch_fifo3_unc_or_parity_err_cnt),
4862  [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4863  			0, 0, CNTR_NORMAL,
4864  			access_tx_launch_fifo2_unc_or_parity_err_cnt),
4865  [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4866  			0, 0, CNTR_NORMAL,
4867  			access_tx_launch_fifo1_unc_or_parity_err_cnt),
4868  [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4869  			0, 0, CNTR_NORMAL,
4870  			access_tx_launch_fifo0_unc_or_parity_err_cnt),
4871  [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4872  			0, 0, CNTR_NORMAL,
4873  			access_tx_sdma15_disallowed_packet_err_cnt),
4874  [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4875  			0, 0, CNTR_NORMAL,
4876  			access_tx_sdma14_disallowed_packet_err_cnt),
4877  [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4878  			0, 0, CNTR_NORMAL,
4879  			access_tx_sdma13_disallowed_packet_err_cnt),
4880  [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4881  			0, 0, CNTR_NORMAL,
4882  			access_tx_sdma12_disallowed_packet_err_cnt),
4883  [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4884  			0, 0, CNTR_NORMAL,
4885  			access_tx_sdma11_disallowed_packet_err_cnt),
4886  [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4887  			0, 0, CNTR_NORMAL,
4888  			access_tx_sdma10_disallowed_packet_err_cnt),
4889  [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4890  			0, 0, CNTR_NORMAL,
4891  			access_tx_sdma9_disallowed_packet_err_cnt),
4892  [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4893  			0, 0, CNTR_NORMAL,
4894  			access_tx_sdma8_disallowed_packet_err_cnt),
4895  [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4896  			0, 0, CNTR_NORMAL,
4897  			access_tx_sdma7_disallowed_packet_err_cnt),
4898  [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4899  			0, 0, CNTR_NORMAL,
4900  			access_tx_sdma6_disallowed_packet_err_cnt),
4901  [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4902  			0, 0, CNTR_NORMAL,
4903  			access_tx_sdma5_disallowed_packet_err_cnt),
4904  [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4905  			0, 0, CNTR_NORMAL,
4906  			access_tx_sdma4_disallowed_packet_err_cnt),
4907  [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4908  			0, 0, CNTR_NORMAL,
4909  			access_tx_sdma3_disallowed_packet_err_cnt),
4910  [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4911  			0, 0, CNTR_NORMAL,
4912  			access_tx_sdma2_disallowed_packet_err_cnt),
4913  [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4914  			0, 0, CNTR_NORMAL,
4915  			access_tx_sdma1_disallowed_packet_err_cnt),
4916  [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4917  			0, 0, CNTR_NORMAL,
4918  			access_tx_sdma0_disallowed_packet_err_cnt),
4919  [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4920  			CNTR_NORMAL,
4921  			access_tx_config_parity_err_cnt),
4922  [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4923  			CNTR_NORMAL,
4924  			access_tx_sbrd_ctl_csr_parity_err_cnt),
4925  [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4926  			CNTR_NORMAL,
4927  			access_tx_launch_csr_parity_err_cnt),
4928  [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4929  			CNTR_NORMAL,
4930  			access_tx_illegal_vl_err_cnt),
4931  [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4932  			"TxSbrdCtlStateMachineParityErr", 0, 0,
4933  			CNTR_NORMAL,
4934  			access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4935  [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4936  			CNTR_NORMAL,
4937  			access_egress_reserved_10_err_cnt),
4938  [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4939  			CNTR_NORMAL,
4940  			access_egress_reserved_9_err_cnt),
4941  [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4942  			0, 0, CNTR_NORMAL,
4943  			access_tx_sdma_launch_intf_parity_err_cnt),
4944  [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4945  			CNTR_NORMAL,
4946  			access_tx_pio_launch_intf_parity_err_cnt),
4947  [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4948  			CNTR_NORMAL,
4949  			access_egress_reserved_6_err_cnt),
4950  [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4951  			CNTR_NORMAL,
4952  			access_tx_incorrect_link_state_err_cnt),
4953  [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4954  			CNTR_NORMAL,
4955  			access_tx_linkdown_err_cnt),
4956  [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4957  			"EgressFifoUnderrunOrParityErr", 0, 0,
4958  			CNTR_NORMAL,
4959  			access_tx_egress_fifi_underrun_or_parity_err_cnt),
4960  [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4961  			CNTR_NORMAL,
4962  			access_egress_reserved_2_err_cnt),
4963  [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4964  			CNTR_NORMAL,
4965  			access_tx_pkt_integrity_mem_unc_err_cnt),
4966  [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4967  			CNTR_NORMAL,
4968  			access_tx_pkt_integrity_mem_cor_err_cnt),
4969  /* SendErrStatus */
4970  [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4971  			CNTR_NORMAL,
4972  			access_send_csr_write_bad_addr_err_cnt),
4973  [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4974  			CNTR_NORMAL,
4975  			access_send_csr_read_bad_addr_err_cnt),
4976  [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4977  			CNTR_NORMAL,
4978  			access_send_csr_parity_cnt),
4979  /* SendCtxtErrStatus */
4980  [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4981  			CNTR_NORMAL,
4982  			access_pio_write_out_of_bounds_err_cnt),
4983  [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4984  			CNTR_NORMAL,
4985  			access_pio_write_overflow_err_cnt),
4986  [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4987  			0, 0, CNTR_NORMAL,
4988  			access_pio_write_crosses_boundary_err_cnt),
4989  [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4990  			CNTR_NORMAL,
4991  			access_pio_disallowed_packet_err_cnt),
4992  [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4993  			CNTR_NORMAL,
4994  			access_pio_inconsistent_sop_err_cnt),
4995  /* SendDmaEngErrStatus */
4996  [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4997  			0, 0, CNTR_NORMAL,
4998  			access_sdma_header_request_fifo_cor_err_cnt),
4999  [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5000  			CNTR_NORMAL,
5001  			access_sdma_header_storage_cor_err_cnt),
5002  [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5003  			CNTR_NORMAL,
5004  			access_sdma_packet_tracking_cor_err_cnt),
5005  [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5006  			CNTR_NORMAL,
5007  			access_sdma_assembly_cor_err_cnt),
5008  [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5009  			CNTR_NORMAL,
5010  			access_sdma_desc_table_cor_err_cnt),
5011  [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5012  			0, 0, CNTR_NORMAL,
5013  			access_sdma_header_request_fifo_unc_err_cnt),
5014  [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5015  			CNTR_NORMAL,
5016  			access_sdma_header_storage_unc_err_cnt),
5017  [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5018  			CNTR_NORMAL,
5019  			access_sdma_packet_tracking_unc_err_cnt),
5020  [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5021  			CNTR_NORMAL,
5022  			access_sdma_assembly_unc_err_cnt),
5023  [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5024  			CNTR_NORMAL,
5025  			access_sdma_desc_table_unc_err_cnt),
5026  [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5027  			CNTR_NORMAL,
5028  			access_sdma_timeout_err_cnt),
5029  [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5030  			CNTR_NORMAL,
5031  			access_sdma_header_length_err_cnt),
5032  [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5033  			CNTR_NORMAL,
5034  			access_sdma_header_address_err_cnt),
5035  [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5036  			CNTR_NORMAL,
5037  			access_sdma_header_select_err_cnt),
5038  [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5039  			CNTR_NORMAL,
5040  			access_sdma_reserved_9_err_cnt),
5041  [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5042  			CNTR_NORMAL,
5043  			access_sdma_packet_desc_overflow_err_cnt),
5044  [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5045  			CNTR_NORMAL,
5046  			access_sdma_length_mismatch_err_cnt),
5047  [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5048  			CNTR_NORMAL,
5049  			access_sdma_halt_err_cnt),
5050  [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5051  			CNTR_NORMAL,
5052  			access_sdma_mem_read_err_cnt),
5053  [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5054  			CNTR_NORMAL,
5055  			access_sdma_first_desc_err_cnt),
5056  [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5057  			CNTR_NORMAL,
5058  			access_sdma_tail_out_of_bounds_err_cnt),
5059  [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5060  			CNTR_NORMAL,
5061  			access_sdma_too_long_err_cnt),
5062  [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5063  			CNTR_NORMAL,
5064  			access_sdma_gen_mismatch_err_cnt),
5065  [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5066  			CNTR_NORMAL,
5067  			access_sdma_wrong_dw_err_cnt),
5068  };
5069  
5070  static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5071  [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5072  			CNTR_NORMAL),
5073  [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5074  			CNTR_NORMAL),
5075  [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5076  			CNTR_NORMAL),
5077  [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5078  			CNTR_NORMAL),
5079  [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5080  			CNTR_NORMAL),
5081  [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5082  			CNTR_NORMAL),
5083  [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5084  			CNTR_NORMAL),
5085  [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5086  [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5087  [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5088  [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5089  				      CNTR_SYNTH | CNTR_VL),
5090  [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5091  				     CNTR_SYNTH | CNTR_VL),
5092  [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5093  				      CNTR_SYNTH | CNTR_VL),
5094  [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5095  [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5096  [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5097  			     access_sw_link_dn_cnt),
5098  [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5099  			   access_sw_link_up_cnt),
5100  [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5101  				 access_sw_unknown_frame_cnt),
5102  [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5103  			     access_sw_xmit_discards),
5104  [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5105  				CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5106  				access_sw_xmit_discards),
5107  [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5108  				 access_xmit_constraint_errs),
5109  [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5110  				access_rcv_constraint_errs),
5111  [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5112  [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5113  [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5114  [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5115  [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5116  [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5117  [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5118  [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5119  [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5120  [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5121  [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5122  [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5123  [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5124  [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5125  			       access_sw_cpu_rc_acks),
5126  [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5127  				access_sw_cpu_rc_qacks),
5128  [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5129  				       access_sw_cpu_rc_delayed_comp),
5130  [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5131  [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5132  [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5133  [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5134  [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5135  [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5136  [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5137  [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5138  [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5139  [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5140  [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5141  [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5142  [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5143  [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5144  [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5145  [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5146  [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5147  [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5148  [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5149  [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5150  [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5151  [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5152  [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5153  [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5154  [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5155  [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5156  [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5157  [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5158  [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5159  [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5160  [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5161  [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5162  [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5163  [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5164  [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5165  [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5166  [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5167  [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5168  [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5169  [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5170  [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5171  [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5172  [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5173  [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5174  [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5175  [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5176  [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5177  [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5178  [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5179  [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5180  [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5181  [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5182  [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5183  [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5184  [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5185  [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5186  [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5187  [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5188  [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5189  [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5190  [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5191  [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5192  [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5193  [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5194  [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5195  [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5196  [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5197  [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5198  [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5199  [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5200  [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5201  [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5202  [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5203  [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5204  [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5205  [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5206  [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5207  [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5208  [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5209  [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5210  };
5211  
5212  /* ======================================================================== */
5213  
5214  /* return true if this is chip revision revision a */
is_ax(struct hfi1_devdata * dd)5215  int is_ax(struct hfi1_devdata *dd)
5216  {
5217  	u8 chip_rev_minor =
5218  		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5219  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5220  	return (chip_rev_minor & 0xf0) == 0;
5221  }
5222  
5223  /* return true if this is chip revision revision b */
is_bx(struct hfi1_devdata * dd)5224  int is_bx(struct hfi1_devdata *dd)
5225  {
5226  	u8 chip_rev_minor =
5227  		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5228  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
5229  	return (chip_rev_minor & 0xF0) == 0x10;
5230  }
5231  
5232  /* return true is kernel urg disabled for rcd */
is_urg_masked(struct hfi1_ctxtdata * rcd)5233  bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5234  {
5235  	u64 mask;
5236  	u32 is = IS_RCVURGENT_START + rcd->ctxt;
5237  	u8 bit = is % 64;
5238  
5239  	mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5240  	return !(mask & BIT_ULL(bit));
5241  }
5242  
5243  /*
5244   * Append string s to buffer buf.  Arguments curp and len are the current
5245   * position and remaining length, respectively.
5246   *
5247   * return 0 on success, 1 on out of room
5248   */
append_str(char * buf,char ** curp,int * lenp,const char * s)5249  static int append_str(char *buf, char **curp, int *lenp, const char *s)
5250  {
5251  	char *p = *curp;
5252  	int len = *lenp;
5253  	int result = 0; /* success */
5254  	char c;
5255  
5256  	/* add a comma, if first in the buffer */
5257  	if (p != buf) {
5258  		if (len == 0) {
5259  			result = 1; /* out of room */
5260  			goto done;
5261  		}
5262  		*p++ = ',';
5263  		len--;
5264  	}
5265  
5266  	/* copy the string */
5267  	while ((c = *s++) != 0) {
5268  		if (len == 0) {
5269  			result = 1; /* out of room */
5270  			goto done;
5271  		}
5272  		*p++ = c;
5273  		len--;
5274  	}
5275  
5276  done:
5277  	/* write return values */
5278  	*curp = p;
5279  	*lenp = len;
5280  
5281  	return result;
5282  }
5283  
5284  /*
5285   * Using the given flag table, print a comma separated string into
5286   * the buffer.  End in '*' if the buffer is too short.
5287   */
flag_string(char * buf,int buf_len,u64 flags,struct flag_table * table,int table_size)5288  static char *flag_string(char *buf, int buf_len, u64 flags,
5289  			 struct flag_table *table, int table_size)
5290  {
5291  	char extra[32];
5292  	char *p = buf;
5293  	int len = buf_len;
5294  	int no_room = 0;
5295  	int i;
5296  
5297  	/* make sure there is at least 2 so we can form "*" */
5298  	if (len < 2)
5299  		return "";
5300  
5301  	len--;	/* leave room for a nul */
5302  	for (i = 0; i < table_size; i++) {
5303  		if (flags & table[i].flag) {
5304  			no_room = append_str(buf, &p, &len, table[i].str);
5305  			if (no_room)
5306  				break;
5307  			flags &= ~table[i].flag;
5308  		}
5309  	}
5310  
5311  	/* any undocumented bits left? */
5312  	if (!no_room && flags) {
5313  		snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5314  		no_room = append_str(buf, &p, &len, extra);
5315  	}
5316  
5317  	/* add * if ran out of room */
5318  	if (no_room) {
5319  		/* may need to back up to add space for a '*' */
5320  		if (len == 0)
5321  			--p;
5322  		*p++ = '*';
5323  	}
5324  
5325  	/* add final nul - space already allocated above */
5326  	*p = 0;
5327  	return buf;
5328  }
5329  
5330  /* first 8 CCE error interrupt source names */
5331  static const char * const cce_misc_names[] = {
5332  	"CceErrInt",		/* 0 */
5333  	"RxeErrInt",		/* 1 */
5334  	"MiscErrInt",		/* 2 */
5335  	"Reserved3",		/* 3 */
5336  	"PioErrInt",		/* 4 */
5337  	"SDmaErrInt",		/* 5 */
5338  	"EgressErrInt",		/* 6 */
5339  	"TxeErrInt"		/* 7 */
5340  };
5341  
5342  /*
5343   * Return the miscellaneous error interrupt name.
5344   */
is_misc_err_name(char * buf,size_t bsize,unsigned int source)5345  static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5346  {
5347  	if (source < ARRAY_SIZE(cce_misc_names))
5348  		strncpy(buf, cce_misc_names[source], bsize);
5349  	else
5350  		snprintf(buf, bsize, "Reserved%u",
5351  			 source + IS_GENERAL_ERR_START);
5352  
5353  	return buf;
5354  }
5355  
5356  /*
5357   * Return the SDMA engine error interrupt name.
5358   */
is_sdma_eng_err_name(char * buf,size_t bsize,unsigned int source)5359  static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5360  {
5361  	snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5362  	return buf;
5363  }
5364  
5365  /*
5366   * Return the send context error interrupt name.
5367   */
is_sendctxt_err_name(char * buf,size_t bsize,unsigned int source)5368  static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5369  {
5370  	snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5371  	return buf;
5372  }
5373  
5374  static const char * const various_names[] = {
5375  	"PbcInt",
5376  	"GpioAssertInt",
5377  	"Qsfp1Int",
5378  	"Qsfp2Int",
5379  	"TCritInt"
5380  };
5381  
5382  /*
5383   * Return the various interrupt name.
5384   */
is_various_name(char * buf,size_t bsize,unsigned int source)5385  static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5386  {
5387  	if (source < ARRAY_SIZE(various_names))
5388  		strncpy(buf, various_names[source], bsize);
5389  	else
5390  		snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5391  	return buf;
5392  }
5393  
5394  /*
5395   * Return the DC interrupt name.
5396   */
is_dc_name(char * buf,size_t bsize,unsigned int source)5397  static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5398  {
5399  	static const char * const dc_int_names[] = {
5400  		"common",
5401  		"lcb",
5402  		"8051",
5403  		"lbm"	/* local block merge */
5404  	};
5405  
5406  	if (source < ARRAY_SIZE(dc_int_names))
5407  		snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5408  	else
5409  		snprintf(buf, bsize, "DCInt%u", source);
5410  	return buf;
5411  }
5412  
5413  static const char * const sdma_int_names[] = {
5414  	"SDmaInt",
5415  	"SdmaIdleInt",
5416  	"SdmaProgressInt",
5417  };
5418  
5419  /*
5420   * Return the SDMA engine interrupt name.
5421   */
is_sdma_eng_name(char * buf,size_t bsize,unsigned int source)5422  static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5423  {
5424  	/* what interrupt */
5425  	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5426  	/* which engine */
5427  	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5428  
5429  	if (likely(what < 3))
5430  		snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5431  	else
5432  		snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5433  	return buf;
5434  }
5435  
5436  /*
5437   * Return the receive available interrupt name.
5438   */
is_rcv_avail_name(char * buf,size_t bsize,unsigned int source)5439  static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5440  {
5441  	snprintf(buf, bsize, "RcvAvailInt%u", source);
5442  	return buf;
5443  }
5444  
5445  /*
5446   * Return the receive urgent interrupt name.
5447   */
is_rcv_urgent_name(char * buf,size_t bsize,unsigned int source)5448  static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5449  {
5450  	snprintf(buf, bsize, "RcvUrgentInt%u", source);
5451  	return buf;
5452  }
5453  
5454  /*
5455   * Return the send credit interrupt name.
5456   */
is_send_credit_name(char * buf,size_t bsize,unsigned int source)5457  static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5458  {
5459  	snprintf(buf, bsize, "SendCreditInt%u", source);
5460  	return buf;
5461  }
5462  
5463  /*
5464   * Return the reserved interrupt name.
5465   */
is_reserved_name(char * buf,size_t bsize,unsigned int source)5466  static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5467  {
5468  	snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5469  	return buf;
5470  }
5471  
cce_err_status_string(char * buf,int buf_len,u64 flags)5472  static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5473  {
5474  	return flag_string(buf, buf_len, flags,
5475  			   cce_err_status_flags,
5476  			   ARRAY_SIZE(cce_err_status_flags));
5477  }
5478  
rxe_err_status_string(char * buf,int buf_len,u64 flags)5479  static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5480  {
5481  	return flag_string(buf, buf_len, flags,
5482  			   rxe_err_status_flags,
5483  			   ARRAY_SIZE(rxe_err_status_flags));
5484  }
5485  
misc_err_status_string(char * buf,int buf_len,u64 flags)5486  static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5487  {
5488  	return flag_string(buf, buf_len, flags, misc_err_status_flags,
5489  			   ARRAY_SIZE(misc_err_status_flags));
5490  }
5491  
pio_err_status_string(char * buf,int buf_len,u64 flags)5492  static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5493  {
5494  	return flag_string(buf, buf_len, flags,
5495  			   pio_err_status_flags,
5496  			   ARRAY_SIZE(pio_err_status_flags));
5497  }
5498  
sdma_err_status_string(char * buf,int buf_len,u64 flags)5499  static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5500  {
5501  	return flag_string(buf, buf_len, flags,
5502  			   sdma_err_status_flags,
5503  			   ARRAY_SIZE(sdma_err_status_flags));
5504  }
5505  
egress_err_status_string(char * buf,int buf_len,u64 flags)5506  static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5507  {
5508  	return flag_string(buf, buf_len, flags,
5509  			   egress_err_status_flags,
5510  			   ARRAY_SIZE(egress_err_status_flags));
5511  }
5512  
egress_err_info_string(char * buf,int buf_len,u64 flags)5513  static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5514  {
5515  	return flag_string(buf, buf_len, flags,
5516  			   egress_err_info_flags,
5517  			   ARRAY_SIZE(egress_err_info_flags));
5518  }
5519  
send_err_status_string(char * buf,int buf_len,u64 flags)5520  static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5521  {
5522  	return flag_string(buf, buf_len, flags,
5523  			   send_err_status_flags,
5524  			   ARRAY_SIZE(send_err_status_flags));
5525  }
5526  
handle_cce_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5527  static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5528  {
5529  	char buf[96];
5530  	int i = 0;
5531  
5532  	/*
5533  	 * For most these errors, there is nothing that can be done except
5534  	 * report or record it.
5535  	 */
5536  	dd_dev_info(dd, "CCE Error: %s\n",
5537  		    cce_err_status_string(buf, sizeof(buf), reg));
5538  
5539  	if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5540  	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5541  		/* this error requires a manual drop into SPC freeze mode */
5542  		/* then a fix up */
5543  		start_freeze_handling(dd->pport, FREEZE_SELF);
5544  	}
5545  
5546  	for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5547  		if (reg & (1ull << i)) {
5548  			incr_cntr64(&dd->cce_err_status_cnt[i]);
5549  			/* maintain a counter over all cce_err_status errors */
5550  			incr_cntr64(&dd->sw_cce_err_status_aggregate);
5551  		}
5552  	}
5553  }
5554  
5555  /*
5556   * Check counters for receive errors that do not have an interrupt
5557   * associated with them.
5558   */
5559  #define RCVERR_CHECK_TIME 10
update_rcverr_timer(struct timer_list * t)5560  static void update_rcverr_timer(struct timer_list *t)
5561  {
5562  	struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5563  	struct hfi1_pportdata *ppd = dd->pport;
5564  	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5565  
5566  	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5567  	    ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5568  		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5569  		set_link_down_reason(
5570  		ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5571  		OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5572  		queue_work(ppd->link_wq, &ppd->link_bounce_work);
5573  	}
5574  	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5575  
5576  	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5577  }
5578  
init_rcverr(struct hfi1_devdata * dd)5579  static int init_rcverr(struct hfi1_devdata *dd)
5580  {
5581  	timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5582  	/* Assume the hardware counter has been reset */
5583  	dd->rcv_ovfl_cnt = 0;
5584  	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5585  }
5586  
free_rcverr(struct hfi1_devdata * dd)5587  static void free_rcverr(struct hfi1_devdata *dd)
5588  {
5589  	if (dd->rcverr_timer.function)
5590  		del_timer_sync(&dd->rcverr_timer);
5591  }
5592  
handle_rxe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5593  static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5594  {
5595  	char buf[96];
5596  	int i = 0;
5597  
5598  	dd_dev_info(dd, "Receive Error: %s\n",
5599  		    rxe_err_status_string(buf, sizeof(buf), reg));
5600  
5601  	if (reg & ALL_RXE_FREEZE_ERR) {
5602  		int flags = 0;
5603  
5604  		/*
5605  		 * Freeze mode recovery is disabled for the errors
5606  		 * in RXE_FREEZE_ABORT_MASK
5607  		 */
5608  		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5609  			flags = FREEZE_ABORT;
5610  
5611  		start_freeze_handling(dd->pport, flags);
5612  	}
5613  
5614  	for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5615  		if (reg & (1ull << i))
5616  			incr_cntr64(&dd->rcv_err_status_cnt[i]);
5617  	}
5618  }
5619  
handle_misc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5620  static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5621  {
5622  	char buf[96];
5623  	int i = 0;
5624  
5625  	dd_dev_info(dd, "Misc Error: %s",
5626  		    misc_err_status_string(buf, sizeof(buf), reg));
5627  	for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5628  		if (reg & (1ull << i))
5629  			incr_cntr64(&dd->misc_err_status_cnt[i]);
5630  	}
5631  }
5632  
handle_pio_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5633  static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5634  {
5635  	char buf[96];
5636  	int i = 0;
5637  
5638  	dd_dev_info(dd, "PIO Error: %s\n",
5639  		    pio_err_status_string(buf, sizeof(buf), reg));
5640  
5641  	if (reg & ALL_PIO_FREEZE_ERR)
5642  		start_freeze_handling(dd->pport, 0);
5643  
5644  	for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5645  		if (reg & (1ull << i))
5646  			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5647  	}
5648  }
5649  
handle_sdma_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5650  static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5651  {
5652  	char buf[96];
5653  	int i = 0;
5654  
5655  	dd_dev_info(dd, "SDMA Error: %s\n",
5656  		    sdma_err_status_string(buf, sizeof(buf), reg));
5657  
5658  	if (reg & ALL_SDMA_FREEZE_ERR)
5659  		start_freeze_handling(dd->pport, 0);
5660  
5661  	for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5662  		if (reg & (1ull << i))
5663  			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5664  	}
5665  }
5666  
__count_port_discards(struct hfi1_pportdata * ppd)5667  static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5668  {
5669  	incr_cntr64(&ppd->port_xmit_discards);
5670  }
5671  
count_port_inactive(struct hfi1_devdata * dd)5672  static void count_port_inactive(struct hfi1_devdata *dd)
5673  {
5674  	__count_port_discards(dd->pport);
5675  }
5676  
5677  /*
5678   * We have had a "disallowed packet" error during egress. Determine the
5679   * integrity check which failed, and update relevant error counter, etc.
5680   *
5681   * Note that the SEND_EGRESS_ERR_INFO register has only a single
5682   * bit of state per integrity check, and so we can miss the reason for an
5683   * egress error if more than one packet fails the same integrity check
5684   * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5685   */
handle_send_egress_err_info(struct hfi1_devdata * dd,int vl)5686  static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5687  					int vl)
5688  {
5689  	struct hfi1_pportdata *ppd = dd->pport;
5690  	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5691  	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5692  	char buf[96];
5693  
5694  	/* clear down all observed info as quickly as possible after read */
5695  	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5696  
5697  	dd_dev_info(dd,
5698  		    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5699  		    info, egress_err_info_string(buf, sizeof(buf), info), src);
5700  
5701  	/* Eventually add other counters for each bit */
5702  	if (info & PORT_DISCARD_EGRESS_ERRS) {
5703  		int weight, i;
5704  
5705  		/*
5706  		 * Count all applicable bits as individual errors and
5707  		 * attribute them to the packet that triggered this handler.
5708  		 * This may not be completely accurate due to limitations
5709  		 * on the available hardware error information.  There is
5710  		 * a single information register and any number of error
5711  		 * packets may have occurred and contributed to it before
5712  		 * this routine is called.  This means that:
5713  		 * a) If multiple packets with the same error occur before
5714  		 *    this routine is called, earlier packets are missed.
5715  		 *    There is only a single bit for each error type.
5716  		 * b) Errors may not be attributed to the correct VL.
5717  		 *    The driver is attributing all bits in the info register
5718  		 *    to the packet that triggered this call, but bits
5719  		 *    could be an accumulation of different packets with
5720  		 *    different VLs.
5721  		 * c) A single error packet may have multiple counts attached
5722  		 *    to it.  There is no way for the driver to know if
5723  		 *    multiple bits set in the info register are due to a
5724  		 *    single packet or multiple packets.  The driver assumes
5725  		 *    multiple packets.
5726  		 */
5727  		weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5728  		for (i = 0; i < weight; i++) {
5729  			__count_port_discards(ppd);
5730  			if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5731  				incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5732  			else if (vl == 15)
5733  				incr_cntr64(&ppd->port_xmit_discards_vl
5734  					    [C_VL_15]);
5735  		}
5736  	}
5737  }
5738  
5739  /*
5740   * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5741   * register. Does it represent a 'port inactive' error?
5742   */
port_inactive_err(u64 posn)5743  static inline int port_inactive_err(u64 posn)
5744  {
5745  	return (posn >= SEES(TX_LINKDOWN) &&
5746  		posn <= SEES(TX_INCORRECT_LINK_STATE));
5747  }
5748  
5749  /*
5750   * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5751   * register. Does it represent a 'disallowed packet' error?
5752   */
disallowed_pkt_err(int posn)5753  static inline int disallowed_pkt_err(int posn)
5754  {
5755  	return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5756  		posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5757  }
5758  
5759  /*
5760   * Input value is a bit position of one of the SDMA engine disallowed
5761   * packet errors.  Return which engine.  Use of this must be guarded by
5762   * disallowed_pkt_err().
5763   */
disallowed_pkt_engine(int posn)5764  static inline int disallowed_pkt_engine(int posn)
5765  {
5766  	return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5767  }
5768  
5769  /*
5770   * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5771   * be done.
5772   */
engine_to_vl(struct hfi1_devdata * dd,int engine)5773  static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5774  {
5775  	struct sdma_vl_map *m;
5776  	int vl;
5777  
5778  	/* range check */
5779  	if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5780  		return -1;
5781  
5782  	rcu_read_lock();
5783  	m = rcu_dereference(dd->sdma_map);
5784  	vl = m->engine_to_vl[engine];
5785  	rcu_read_unlock();
5786  
5787  	return vl;
5788  }
5789  
5790  /*
5791   * Translate the send context (sofware index) into a VL.  Return -1 if the
5792   * translation cannot be done.
5793   */
sc_to_vl(struct hfi1_devdata * dd,int sw_index)5794  static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5795  {
5796  	struct send_context_info *sci;
5797  	struct send_context *sc;
5798  	int i;
5799  
5800  	sci = &dd->send_contexts[sw_index];
5801  
5802  	/* there is no information for user (PSM) and ack contexts */
5803  	if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5804  		return -1;
5805  
5806  	sc = sci->sc;
5807  	if (!sc)
5808  		return -1;
5809  	if (dd->vld[15].sc == sc)
5810  		return 15;
5811  	for (i = 0; i < num_vls; i++)
5812  		if (dd->vld[i].sc == sc)
5813  			return i;
5814  
5815  	return -1;
5816  }
5817  
handle_egress_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5818  static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5819  {
5820  	u64 reg_copy = reg, handled = 0;
5821  	char buf[96];
5822  	int i = 0;
5823  
5824  	if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5825  		start_freeze_handling(dd->pport, 0);
5826  	else if (is_ax(dd) &&
5827  		 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5828  		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5829  		start_freeze_handling(dd->pport, 0);
5830  
5831  	while (reg_copy) {
5832  		int posn = fls64(reg_copy);
5833  		/* fls64() returns a 1-based offset, we want it zero based */
5834  		int shift = posn - 1;
5835  		u64 mask = 1ULL << shift;
5836  
5837  		if (port_inactive_err(shift)) {
5838  			count_port_inactive(dd);
5839  			handled |= mask;
5840  		} else if (disallowed_pkt_err(shift)) {
5841  			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5842  
5843  			handle_send_egress_err_info(dd, vl);
5844  			handled |= mask;
5845  		}
5846  		reg_copy &= ~mask;
5847  	}
5848  
5849  	reg &= ~handled;
5850  
5851  	if (reg)
5852  		dd_dev_info(dd, "Egress Error: %s\n",
5853  			    egress_err_status_string(buf, sizeof(buf), reg));
5854  
5855  	for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5856  		if (reg & (1ull << i))
5857  			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5858  	}
5859  }
5860  
handle_txe_err(struct hfi1_devdata * dd,u32 unused,u64 reg)5861  static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5862  {
5863  	char buf[96];
5864  	int i = 0;
5865  
5866  	dd_dev_info(dd, "Send Error: %s\n",
5867  		    send_err_status_string(buf, sizeof(buf), reg));
5868  
5869  	for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5870  		if (reg & (1ull << i))
5871  			incr_cntr64(&dd->send_err_status_cnt[i]);
5872  	}
5873  }
5874  
5875  /*
5876   * The maximum number of times the error clear down will loop before
5877   * blocking a repeating error.  This value is arbitrary.
5878   */
5879  #define MAX_CLEAR_COUNT 20
5880  
5881  /*
5882   * Clear and handle an error register.  All error interrupts are funneled
5883   * through here to have a central location to correctly handle single-
5884   * or multi-shot errors.
5885   *
5886   * For non per-context registers, call this routine with a context value
5887   * of 0 so the per-context offset is zero.
5888   *
5889   * If the handler loops too many times, assume that something is wrong
5890   * and can't be fixed, so mask the error bits.
5891   */
interrupt_clear_down(struct hfi1_devdata * dd,u32 context,const struct err_reg_info * eri)5892  static void interrupt_clear_down(struct hfi1_devdata *dd,
5893  				 u32 context,
5894  				 const struct err_reg_info *eri)
5895  {
5896  	u64 reg;
5897  	u32 count;
5898  
5899  	/* read in a loop until no more errors are seen */
5900  	count = 0;
5901  	while (1) {
5902  		reg = read_kctxt_csr(dd, context, eri->status);
5903  		if (reg == 0)
5904  			break;
5905  		write_kctxt_csr(dd, context, eri->clear, reg);
5906  		if (likely(eri->handler))
5907  			eri->handler(dd, context, reg);
5908  		count++;
5909  		if (count > MAX_CLEAR_COUNT) {
5910  			u64 mask;
5911  
5912  			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5913  				   eri->desc, reg);
5914  			/*
5915  			 * Read-modify-write so any other masked bits
5916  			 * remain masked.
5917  			 */
5918  			mask = read_kctxt_csr(dd, context, eri->mask);
5919  			mask &= ~reg;
5920  			write_kctxt_csr(dd, context, eri->mask, mask);
5921  			break;
5922  		}
5923  	}
5924  }
5925  
5926  /*
5927   * CCE block "misc" interrupt.  Source is < 16.
5928   */
is_misc_err_int(struct hfi1_devdata * dd,unsigned int source)5929  static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5930  {
5931  	const struct err_reg_info *eri = &misc_errs[source];
5932  
5933  	if (eri->handler) {
5934  		interrupt_clear_down(dd, 0, eri);
5935  	} else {
5936  		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5937  			   source);
5938  	}
5939  }
5940  
send_context_err_status_string(char * buf,int buf_len,u64 flags)5941  static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5942  {
5943  	return flag_string(buf, buf_len, flags,
5944  			   sc_err_status_flags,
5945  			   ARRAY_SIZE(sc_err_status_flags));
5946  }
5947  
5948  /*
5949   * Send context error interrupt.  Source (hw_context) is < 160.
5950   *
5951   * All send context errors cause the send context to halt.  The normal
5952   * clear-down mechanism cannot be used because we cannot clear the
5953   * error bits until several other long-running items are done first.
5954   * This is OK because with the context halted, nothing else is going
5955   * to happen on it anyway.
5956   */
is_sendctxt_err_int(struct hfi1_devdata * dd,unsigned int hw_context)5957  static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5958  				unsigned int hw_context)
5959  {
5960  	struct send_context_info *sci;
5961  	struct send_context *sc;
5962  	char flags[96];
5963  	u64 status;
5964  	u32 sw_index;
5965  	int i = 0;
5966  	unsigned long irq_flags;
5967  
5968  	sw_index = dd->hw_to_sw[hw_context];
5969  	if (sw_index >= dd->num_send_contexts) {
5970  		dd_dev_err(dd,
5971  			   "out of range sw index %u for send context %u\n",
5972  			   sw_index, hw_context);
5973  		return;
5974  	}
5975  	sci = &dd->send_contexts[sw_index];
5976  	spin_lock_irqsave(&dd->sc_lock, irq_flags);
5977  	sc = sci->sc;
5978  	if (!sc) {
5979  		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5980  			   sw_index, hw_context);
5981  		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5982  		return;
5983  	}
5984  
5985  	/* tell the software that a halt has begun */
5986  	sc_stop(sc, SCF_HALTED);
5987  
5988  	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5989  
5990  	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5991  		    send_context_err_status_string(flags, sizeof(flags),
5992  						   status));
5993  
5994  	if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5995  		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5996  
5997  	/*
5998  	 * Automatically restart halted kernel contexts out of interrupt
5999  	 * context.  User contexts must ask the driver to restart the context.
6000  	 */
6001  	if (sc->type != SC_USER)
6002  		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6003  	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6004  
6005  	/*
6006  	 * Update the counters for the corresponding status bits.
6007  	 * Note that these particular counters are aggregated over all
6008  	 * 160 contexts.
6009  	 */
6010  	for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6011  		if (status & (1ull << i))
6012  			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6013  	}
6014  }
6015  
handle_sdma_eng_err(struct hfi1_devdata * dd,unsigned int source,u64 status)6016  static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6017  				unsigned int source, u64 status)
6018  {
6019  	struct sdma_engine *sde;
6020  	int i = 0;
6021  
6022  	sde = &dd->per_sdma[source];
6023  #ifdef CONFIG_SDMA_VERBOSITY
6024  	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6025  		   slashstrip(__FILE__), __LINE__, __func__);
6026  	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6027  		   sde->this_idx, source, (unsigned long long)status);
6028  #endif
6029  	sde->err_cnt++;
6030  	sdma_engine_error(sde, status);
6031  
6032  	/*
6033  	* Update the counters for the corresponding status bits.
6034  	* Note that these particular counters are aggregated over
6035  	* all 16 DMA engines.
6036  	*/
6037  	for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6038  		if (status & (1ull << i))
6039  			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6040  	}
6041  }
6042  
6043  /*
6044   * CCE block SDMA error interrupt.  Source is < 16.
6045   */
is_sdma_eng_err_int(struct hfi1_devdata * dd,unsigned int source)6046  static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6047  {
6048  #ifdef CONFIG_SDMA_VERBOSITY
6049  	struct sdma_engine *sde = &dd->per_sdma[source];
6050  
6051  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6052  		   slashstrip(__FILE__), __LINE__, __func__);
6053  	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6054  		   source);
6055  	sdma_dumpstate(sde);
6056  #endif
6057  	interrupt_clear_down(dd, source, &sdma_eng_err);
6058  }
6059  
6060  /*
6061   * CCE block "various" interrupt.  Source is < 8.
6062   */
is_various_int(struct hfi1_devdata * dd,unsigned int source)6063  static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6064  {
6065  	const struct err_reg_info *eri = &various_err[source];
6066  
6067  	/*
6068  	 * TCritInt cannot go through interrupt_clear_down()
6069  	 * because it is not a second tier interrupt. The handler
6070  	 * should be called directly.
6071  	 */
6072  	if (source == TCRIT_INT_SOURCE)
6073  		handle_temp_err(dd);
6074  	else if (eri->handler)
6075  		interrupt_clear_down(dd, 0, eri);
6076  	else
6077  		dd_dev_info(dd,
6078  			    "%s: Unimplemented/reserved interrupt %d\n",
6079  			    __func__, source);
6080  }
6081  
handle_qsfp_int(struct hfi1_devdata * dd,u32 src_ctx,u64 reg)6082  static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6083  {
6084  	/* src_ctx is always zero */
6085  	struct hfi1_pportdata *ppd = dd->pport;
6086  	unsigned long flags;
6087  	u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6088  
6089  	if (reg & QSFP_HFI0_MODPRST_N) {
6090  		if (!qsfp_mod_present(ppd)) {
6091  			dd_dev_info(dd, "%s: QSFP module removed\n",
6092  				    __func__);
6093  
6094  			ppd->driver_link_ready = 0;
6095  			/*
6096  			 * Cable removed, reset all our information about the
6097  			 * cache and cable capabilities
6098  			 */
6099  
6100  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6101  			/*
6102  			 * We don't set cache_refresh_required here as we expect
6103  			 * an interrupt when a cable is inserted
6104  			 */
6105  			ppd->qsfp_info.cache_valid = 0;
6106  			ppd->qsfp_info.reset_needed = 0;
6107  			ppd->qsfp_info.limiting_active = 0;
6108  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6109  					       flags);
6110  			/* Invert the ModPresent pin now to detect plug-in */
6111  			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6112  				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6113  
6114  			if ((ppd->offline_disabled_reason >
6115  			  HFI1_ODR_MASK(
6116  			  OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6117  			  (ppd->offline_disabled_reason ==
6118  			  HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6119  				ppd->offline_disabled_reason =
6120  				HFI1_ODR_MASK(
6121  				OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6122  
6123  			if (ppd->host_link_state == HLS_DN_POLL) {
6124  				/*
6125  				 * The link is still in POLL. This means
6126  				 * that the normal link down processing
6127  				 * will not happen. We have to do it here
6128  				 * before turning the DC off.
6129  				 */
6130  				queue_work(ppd->link_wq, &ppd->link_down_work);
6131  			}
6132  		} else {
6133  			dd_dev_info(dd, "%s: QSFP module inserted\n",
6134  				    __func__);
6135  
6136  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6137  			ppd->qsfp_info.cache_valid = 0;
6138  			ppd->qsfp_info.cache_refresh_required = 1;
6139  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6140  					       flags);
6141  
6142  			/*
6143  			 * Stop inversion of ModPresent pin to detect
6144  			 * removal of the cable
6145  			 */
6146  			qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6147  			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6148  				  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6149  
6150  			ppd->offline_disabled_reason =
6151  				HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6152  		}
6153  	}
6154  
6155  	if (reg & QSFP_HFI0_INT_N) {
6156  		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6157  			    __func__);
6158  		spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6159  		ppd->qsfp_info.check_interrupt_flags = 1;
6160  		spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6161  	}
6162  
6163  	/* Schedule the QSFP work only if there is a cable attached. */
6164  	if (qsfp_mod_present(ppd))
6165  		queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6166  }
6167  
request_host_lcb_access(struct hfi1_devdata * dd)6168  static int request_host_lcb_access(struct hfi1_devdata *dd)
6169  {
6170  	int ret;
6171  
6172  	ret = do_8051_command(dd, HCMD_MISC,
6173  			      (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6174  			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6175  	if (ret != HCMD_SUCCESS) {
6176  		dd_dev_err(dd, "%s: command failed with error %d\n",
6177  			   __func__, ret);
6178  	}
6179  	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6180  }
6181  
request_8051_lcb_access(struct hfi1_devdata * dd)6182  static int request_8051_lcb_access(struct hfi1_devdata *dd)
6183  {
6184  	int ret;
6185  
6186  	ret = do_8051_command(dd, HCMD_MISC,
6187  			      (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6188  			      LOAD_DATA_FIELD_ID_SHIFT, NULL);
6189  	if (ret != HCMD_SUCCESS) {
6190  		dd_dev_err(dd, "%s: command failed with error %d\n",
6191  			   __func__, ret);
6192  	}
6193  	return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6194  }
6195  
6196  /*
6197   * Set the LCB selector - allow host access.  The DCC selector always
6198   * points to the host.
6199   */
set_host_lcb_access(struct hfi1_devdata * dd)6200  static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6201  {
6202  	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6203  		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6204  		  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6205  }
6206  
6207  /*
6208   * Clear the LCB selector - allow 8051 access.  The DCC selector always
6209   * points to the host.
6210   */
set_8051_lcb_access(struct hfi1_devdata * dd)6211  static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6212  {
6213  	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6214  		  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6215  }
6216  
6217  /*
6218   * Acquire LCB access from the 8051.  If the host already has access,
6219   * just increment a counter.  Otherwise, inform the 8051 that the
6220   * host is taking access.
6221   *
6222   * Returns:
6223   *	0 on success
6224   *	-EBUSY if the 8051 has control and cannot be disturbed
6225   *	-errno if unable to acquire access from the 8051
6226   */
acquire_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6227  int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6228  {
6229  	struct hfi1_pportdata *ppd = dd->pport;
6230  	int ret = 0;
6231  
6232  	/*
6233  	 * Use the host link state lock so the operation of this routine
6234  	 * { link state check, selector change, count increment } can occur
6235  	 * as a unit against a link state change.  Otherwise there is a
6236  	 * race between the state change and the count increment.
6237  	 */
6238  	if (sleep_ok) {
6239  		mutex_lock(&ppd->hls_lock);
6240  	} else {
6241  		while (!mutex_trylock(&ppd->hls_lock))
6242  			udelay(1);
6243  	}
6244  
6245  	/* this access is valid only when the link is up */
6246  	if (ppd->host_link_state & HLS_DOWN) {
6247  		dd_dev_info(dd, "%s: link state %s not up\n",
6248  			    __func__, link_state_name(ppd->host_link_state));
6249  		ret = -EBUSY;
6250  		goto done;
6251  	}
6252  
6253  	if (dd->lcb_access_count == 0) {
6254  		ret = request_host_lcb_access(dd);
6255  		if (ret) {
6256  			dd_dev_err(dd,
6257  				   "%s: unable to acquire LCB access, err %d\n",
6258  				   __func__, ret);
6259  			goto done;
6260  		}
6261  		set_host_lcb_access(dd);
6262  	}
6263  	dd->lcb_access_count++;
6264  done:
6265  	mutex_unlock(&ppd->hls_lock);
6266  	return ret;
6267  }
6268  
6269  /*
6270   * Release LCB access by decrementing the use count.  If the count is moving
6271   * from 1 to 0, inform 8051 that it has control back.
6272   *
6273   * Returns:
6274   *	0 on success
6275   *	-errno if unable to release access to the 8051
6276   */
release_lcb_access(struct hfi1_devdata * dd,int sleep_ok)6277  int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6278  {
6279  	int ret = 0;
6280  
6281  	/*
6282  	 * Use the host link state lock because the acquire needed it.
6283  	 * Here, we only need to keep { selector change, count decrement }
6284  	 * as a unit.
6285  	 */
6286  	if (sleep_ok) {
6287  		mutex_lock(&dd->pport->hls_lock);
6288  	} else {
6289  		while (!mutex_trylock(&dd->pport->hls_lock))
6290  			udelay(1);
6291  	}
6292  
6293  	if (dd->lcb_access_count == 0) {
6294  		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6295  			   __func__);
6296  		goto done;
6297  	}
6298  
6299  	if (dd->lcb_access_count == 1) {
6300  		set_8051_lcb_access(dd);
6301  		ret = request_8051_lcb_access(dd);
6302  		if (ret) {
6303  			dd_dev_err(dd,
6304  				   "%s: unable to release LCB access, err %d\n",
6305  				   __func__, ret);
6306  			/* restore host access if the grant didn't work */
6307  			set_host_lcb_access(dd);
6308  			goto done;
6309  		}
6310  	}
6311  	dd->lcb_access_count--;
6312  done:
6313  	mutex_unlock(&dd->pport->hls_lock);
6314  	return ret;
6315  }
6316  
6317  /*
6318   * Initialize LCB access variables and state.  Called during driver load,
6319   * after most of the initialization is finished.
6320   *
6321   * The DC default is LCB access on for the host.  The driver defaults to
6322   * leaving access to the 8051.  Assign access now - this constrains the call
6323   * to this routine to be after all LCB set-up is done.  In particular, after
6324   * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6325   */
init_lcb_access(struct hfi1_devdata * dd)6326  static void init_lcb_access(struct hfi1_devdata *dd)
6327  {
6328  	dd->lcb_access_count = 0;
6329  }
6330  
6331  /*
6332   * Write a response back to a 8051 request.
6333   */
hreq_response(struct hfi1_devdata * dd,u8 return_code,u16 rsp_data)6334  static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6335  {
6336  	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6337  		  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6338  		  (u64)return_code <<
6339  		  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6340  		  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6341  }
6342  
6343  /*
6344   * Handle host requests from the 8051.
6345   */
handle_8051_request(struct hfi1_pportdata * ppd)6346  static void handle_8051_request(struct hfi1_pportdata *ppd)
6347  {
6348  	struct hfi1_devdata *dd = ppd->dd;
6349  	u64 reg;
6350  	u16 data = 0;
6351  	u8 type;
6352  
6353  	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6354  	if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6355  		return;	/* no request */
6356  
6357  	/* zero out COMPLETED so the response is seen */
6358  	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6359  
6360  	/* extract request details */
6361  	type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6362  			& DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6363  	data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6364  			& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6365  
6366  	switch (type) {
6367  	case HREQ_LOAD_CONFIG:
6368  	case HREQ_SAVE_CONFIG:
6369  	case HREQ_READ_CONFIG:
6370  	case HREQ_SET_TX_EQ_ABS:
6371  	case HREQ_SET_TX_EQ_REL:
6372  	case HREQ_ENABLE:
6373  		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6374  			    type);
6375  		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6376  		break;
6377  	case HREQ_LCB_RESET:
6378  		/* Put the LCB, RX FPE and TX FPE into reset */
6379  		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6380  		/* Make sure the write completed */
6381  		(void)read_csr(dd, DCC_CFG_RESET);
6382  		/* Hold the reset long enough to take effect */
6383  		udelay(1);
6384  		/* Take the LCB, RX FPE and TX FPE out of reset */
6385  		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6386  		hreq_response(dd, HREQ_SUCCESS, 0);
6387  
6388  		break;
6389  	case HREQ_CONFIG_DONE:
6390  		hreq_response(dd, HREQ_SUCCESS, 0);
6391  		break;
6392  
6393  	case HREQ_INTERFACE_TEST:
6394  		hreq_response(dd, HREQ_SUCCESS, data);
6395  		break;
6396  	default:
6397  		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6398  		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6399  		break;
6400  	}
6401  }
6402  
6403  /*
6404   * Set up allocation unit vaulue.
6405   */
set_up_vau(struct hfi1_devdata * dd,u8 vau)6406  void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6407  {
6408  	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6409  
6410  	/* do not modify other values in the register */
6411  	reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6412  	reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6413  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6414  }
6415  
6416  /*
6417   * Set up initial VL15 credits of the remote.  Assumes the rest of
6418   * the CM credit registers are zero from a previous global or credit reset.
6419   * Shared limit for VL15 will always be 0.
6420   */
set_up_vl15(struct hfi1_devdata * dd,u16 vl15buf)6421  void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6422  {
6423  	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6424  
6425  	/* set initial values for total and shared credit limit */
6426  	reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6427  		 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6428  
6429  	/*
6430  	 * Set total limit to be equal to VL15 credits.
6431  	 * Leave shared limit at 0.
6432  	 */
6433  	reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6434  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6435  
6436  	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6437  		  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6438  }
6439  
6440  /*
6441   * Zero all credit details from the previous connection and
6442   * reset the CM manager's internal counters.
6443   */
reset_link_credits(struct hfi1_devdata * dd)6444  void reset_link_credits(struct hfi1_devdata *dd)
6445  {
6446  	int i;
6447  
6448  	/* remove all previous VL credit limits */
6449  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
6450  		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6451  	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6452  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6453  	/* reset the CM block */
6454  	pio_send_control(dd, PSC_CM_RESET);
6455  	/* reset cached value */
6456  	dd->vl15buf_cached = 0;
6457  }
6458  
6459  /* convert a vCU to a CU */
vcu_to_cu(u8 vcu)6460  static u32 vcu_to_cu(u8 vcu)
6461  {
6462  	return 1 << vcu;
6463  }
6464  
6465  /* convert a CU to a vCU */
cu_to_vcu(u32 cu)6466  static u8 cu_to_vcu(u32 cu)
6467  {
6468  	return ilog2(cu);
6469  }
6470  
6471  /* convert a vAU to an AU */
vau_to_au(u8 vau)6472  static u32 vau_to_au(u8 vau)
6473  {
6474  	return 8 * (1 << vau);
6475  }
6476  
set_linkup_defaults(struct hfi1_pportdata * ppd)6477  static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6478  {
6479  	ppd->sm_trap_qp = 0x0;
6480  	ppd->sa_qp = 0x1;
6481  }
6482  
6483  /*
6484   * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6485   */
lcb_shutdown(struct hfi1_devdata * dd,int abort)6486  static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6487  {
6488  	u64 reg;
6489  
6490  	/* clear lcb run: LCB_CFG_RUN.EN = 0 */
6491  	write_csr(dd, DC_LCB_CFG_RUN, 0);
6492  	/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6493  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6494  		  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6495  	/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6496  	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6497  	reg = read_csr(dd, DCC_CFG_RESET);
6498  	write_csr(dd, DCC_CFG_RESET, reg |
6499  		  DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6500  	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6501  	if (!abort) {
6502  		udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6503  		write_csr(dd, DCC_CFG_RESET, reg);
6504  		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6505  	}
6506  }
6507  
6508  /*
6509   * This routine should be called after the link has been transitioned to
6510   * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6511   * reset).
6512   *
6513   * The expectation is that the caller of this routine would have taken
6514   * care of properly transitioning the link into the correct state.
6515   * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6516   *       before calling this function.
6517   */
_dc_shutdown(struct hfi1_devdata * dd)6518  static void _dc_shutdown(struct hfi1_devdata *dd)
6519  {
6520  	lockdep_assert_held(&dd->dc8051_lock);
6521  
6522  	if (dd->dc_shutdown)
6523  		return;
6524  
6525  	dd->dc_shutdown = 1;
6526  	/* Shutdown the LCB */
6527  	lcb_shutdown(dd, 1);
6528  	/*
6529  	 * Going to OFFLINE would have causes the 8051 to put the
6530  	 * SerDes into reset already. Just need to shut down the 8051,
6531  	 * itself.
6532  	 */
6533  	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6534  }
6535  
dc_shutdown(struct hfi1_devdata * dd)6536  static void dc_shutdown(struct hfi1_devdata *dd)
6537  {
6538  	mutex_lock(&dd->dc8051_lock);
6539  	_dc_shutdown(dd);
6540  	mutex_unlock(&dd->dc8051_lock);
6541  }
6542  
6543  /*
6544   * Calling this after the DC has been brought out of reset should not
6545   * do any damage.
6546   * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6547   *       before calling this function.
6548   */
_dc_start(struct hfi1_devdata * dd)6549  static void _dc_start(struct hfi1_devdata *dd)
6550  {
6551  	lockdep_assert_held(&dd->dc8051_lock);
6552  
6553  	if (!dd->dc_shutdown)
6554  		return;
6555  
6556  	/* Take the 8051 out of reset */
6557  	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6558  	/* Wait until 8051 is ready */
6559  	if (wait_fm_ready(dd, TIMEOUT_8051_START))
6560  		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6561  			   __func__);
6562  
6563  	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6564  	write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6565  	/* lcb_shutdown() with abort=1 does not restore these */
6566  	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6567  	dd->dc_shutdown = 0;
6568  }
6569  
dc_start(struct hfi1_devdata * dd)6570  static void dc_start(struct hfi1_devdata *dd)
6571  {
6572  	mutex_lock(&dd->dc8051_lock);
6573  	_dc_start(dd);
6574  	mutex_unlock(&dd->dc8051_lock);
6575  }
6576  
6577  /*
6578   * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6579   */
adjust_lcb_for_fpga_serdes(struct hfi1_devdata * dd)6580  static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6581  {
6582  	u64 rx_radr, tx_radr;
6583  	u32 version;
6584  
6585  	if (dd->icode != ICODE_FPGA_EMULATION)
6586  		return;
6587  
6588  	/*
6589  	 * These LCB defaults on emulator _s are good, nothing to do here:
6590  	 *	LCB_CFG_TX_FIFOS_RADR
6591  	 *	LCB_CFG_RX_FIFOS_RADR
6592  	 *	LCB_CFG_LN_DCLK
6593  	 *	LCB_CFG_IGNORE_LOST_RCLK
6594  	 */
6595  	if (is_emulator_s(dd))
6596  		return;
6597  	/* else this is _p */
6598  
6599  	version = emulator_rev(dd);
6600  	if (!is_ax(dd))
6601  		version = 0x2d;	/* all B0 use 0x2d or higher settings */
6602  
6603  	if (version <= 0x12) {
6604  		/* release 0x12 and below */
6605  
6606  		/*
6607  		 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6608  		 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6609  		 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6610  		 */
6611  		rx_radr =
6612  		      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615  		/*
6616  		 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6617  		 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6618  		 */
6619  		tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6620  	} else if (version <= 0x18) {
6621  		/* release 0x13 up to 0x18 */
6622  		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6623  		rx_radr =
6624  		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6625  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6626  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6627  		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6628  	} else if (version == 0x19) {
6629  		/* release 0x19 */
6630  		/* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6631  		rx_radr =
6632  		      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6633  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6634  		    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6635  		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6636  	} else if (version == 0x1a) {
6637  		/* release 0x1a */
6638  		/* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6639  		rx_radr =
6640  		      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6641  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6642  		    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6643  		tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6644  		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6645  	} else {
6646  		/* release 0x1b and higher */
6647  		/* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6648  		rx_radr =
6649  		      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6650  		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6651  		    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6652  		tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6653  	}
6654  
6655  	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6656  	/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6657  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6658  		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6659  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6660  }
6661  
6662  /*
6663   * Handle a SMA idle message
6664   *
6665   * This is a work-queue function outside of the interrupt.
6666   */
handle_sma_message(struct work_struct * work)6667  void handle_sma_message(struct work_struct *work)
6668  {
6669  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6670  							sma_message_work);
6671  	struct hfi1_devdata *dd = ppd->dd;
6672  	u64 msg;
6673  	int ret;
6674  
6675  	/*
6676  	 * msg is bytes 1-4 of the 40-bit idle message - the command code
6677  	 * is stripped off
6678  	 */
6679  	ret = read_idle_sma(dd, &msg);
6680  	if (ret)
6681  		return;
6682  	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6683  	/*
6684  	 * React to the SMA message.  Byte[1] (0 for us) is the command.
6685  	 */
6686  	switch (msg & 0xff) {
6687  	case SMA_IDLE_ARM:
6688  		/*
6689  		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6690  		 * State Transitions
6691  		 *
6692  		 * Only expected in INIT or ARMED, discard otherwise.
6693  		 */
6694  		if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6695  			ppd->neighbor_normal = 1;
6696  		break;
6697  	case SMA_IDLE_ACTIVE:
6698  		/*
6699  		 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6700  		 * State Transitions
6701  		 *
6702  		 * Can activate the node.  Discard otherwise.
6703  		 */
6704  		if (ppd->host_link_state == HLS_UP_ARMED &&
6705  		    ppd->is_active_optimize_enabled) {
6706  			ppd->neighbor_normal = 1;
6707  			ret = set_link_state(ppd, HLS_UP_ACTIVE);
6708  			if (ret)
6709  				dd_dev_err(
6710  					dd,
6711  					"%s: received Active SMA idle message, couldn't set link to Active\n",
6712  					__func__);
6713  		}
6714  		break;
6715  	default:
6716  		dd_dev_err(dd,
6717  			   "%s: received unexpected SMA idle message 0x%llx\n",
6718  			   __func__, msg);
6719  		break;
6720  	}
6721  }
6722  
adjust_rcvctrl(struct hfi1_devdata * dd,u64 add,u64 clear)6723  static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6724  {
6725  	u64 rcvctrl;
6726  	unsigned long flags;
6727  
6728  	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6729  	rcvctrl = read_csr(dd, RCV_CTRL);
6730  	rcvctrl |= add;
6731  	rcvctrl &= ~clear;
6732  	write_csr(dd, RCV_CTRL, rcvctrl);
6733  	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6734  }
6735  
add_rcvctrl(struct hfi1_devdata * dd,u64 add)6736  static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6737  {
6738  	adjust_rcvctrl(dd, add, 0);
6739  }
6740  
clear_rcvctrl(struct hfi1_devdata * dd,u64 clear)6741  static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6742  {
6743  	adjust_rcvctrl(dd, 0, clear);
6744  }
6745  
6746  /*
6747   * Called from all interrupt handlers to start handling an SPC freeze.
6748   */
start_freeze_handling(struct hfi1_pportdata * ppd,int flags)6749  void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6750  {
6751  	struct hfi1_devdata *dd = ppd->dd;
6752  	struct send_context *sc;
6753  	int i;
6754  	int sc_flags;
6755  
6756  	if (flags & FREEZE_SELF)
6757  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6758  
6759  	/* enter frozen mode */
6760  	dd->flags |= HFI1_FROZEN;
6761  
6762  	/* notify all SDMA engines that they are going into a freeze */
6763  	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6764  
6765  	sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6766  					      SCF_LINK_DOWN : 0);
6767  	/* do halt pre-handling on all enabled send contexts */
6768  	for (i = 0; i < dd->num_send_contexts; i++) {
6769  		sc = dd->send_contexts[i].sc;
6770  		if (sc && (sc->flags & SCF_ENABLED))
6771  			sc_stop(sc, sc_flags);
6772  	}
6773  
6774  	/* Send context are frozen. Notify user space */
6775  	hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6776  
6777  	if (flags & FREEZE_ABORT) {
6778  		dd_dev_err(dd,
6779  			   "Aborted freeze recovery. Please REBOOT system\n");
6780  		return;
6781  	}
6782  	/* queue non-interrupt handler */
6783  	queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6784  }
6785  
6786  /*
6787   * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6788   * depending on the "freeze" parameter.
6789   *
6790   * No need to return an error if it times out, our only option
6791   * is to proceed anyway.
6792   */
wait_for_freeze_status(struct hfi1_devdata * dd,int freeze)6793  static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6794  {
6795  	unsigned long timeout;
6796  	u64 reg;
6797  
6798  	timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6799  	while (1) {
6800  		reg = read_csr(dd, CCE_STATUS);
6801  		if (freeze) {
6802  			/* waiting until all indicators are set */
6803  			if ((reg & ALL_FROZE) == ALL_FROZE)
6804  				return;	/* all done */
6805  		} else {
6806  			/* waiting until all indicators are clear */
6807  			if ((reg & ALL_FROZE) == 0)
6808  				return; /* all done */
6809  		}
6810  
6811  		if (time_after(jiffies, timeout)) {
6812  			dd_dev_err(dd,
6813  				   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6814  				   freeze ? "" : "un", reg & ALL_FROZE,
6815  				   freeze ? ALL_FROZE : 0ull);
6816  			return;
6817  		}
6818  		usleep_range(80, 120);
6819  	}
6820  }
6821  
6822  /*
6823   * Do all freeze handling for the RXE block.
6824   */
rxe_freeze(struct hfi1_devdata * dd)6825  static void rxe_freeze(struct hfi1_devdata *dd)
6826  {
6827  	int i;
6828  	struct hfi1_ctxtdata *rcd;
6829  
6830  	/* disable port */
6831  	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6832  
6833  	/* disable all receive contexts */
6834  	for (i = 0; i < dd->num_rcv_contexts; i++) {
6835  		rcd = hfi1_rcd_get_by_index(dd, i);
6836  		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6837  		hfi1_rcd_put(rcd);
6838  	}
6839  }
6840  
6841  /*
6842   * Unfreeze handling for the RXE block - kernel contexts only.
6843   * This will also enable the port.  User contexts will do unfreeze
6844   * handling on a per-context basis as they call into the driver.
6845   *
6846   */
rxe_kernel_unfreeze(struct hfi1_devdata * dd)6847  static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6848  {
6849  	u32 rcvmask;
6850  	u16 i;
6851  	struct hfi1_ctxtdata *rcd;
6852  
6853  	/* enable all kernel contexts */
6854  	for (i = 0; i < dd->num_rcv_contexts; i++) {
6855  		rcd = hfi1_rcd_get_by_index(dd, i);
6856  
6857  		/* Ensure all non-user contexts(including vnic) are enabled */
6858  		if (!rcd ||
6859  		    (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6860  			hfi1_rcd_put(rcd);
6861  			continue;
6862  		}
6863  		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6864  		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6865  		rcvmask |= rcd->rcvhdrtail_kvaddr ?
6866  			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6867  		hfi1_rcvctrl(dd, rcvmask, rcd);
6868  		hfi1_rcd_put(rcd);
6869  	}
6870  
6871  	/* enable port */
6872  	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6873  }
6874  
6875  /*
6876   * Non-interrupt SPC freeze handling.
6877   *
6878   * This is a work-queue function outside of the triggering interrupt.
6879   */
handle_freeze(struct work_struct * work)6880  void handle_freeze(struct work_struct *work)
6881  {
6882  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6883  								freeze_work);
6884  	struct hfi1_devdata *dd = ppd->dd;
6885  
6886  	/* wait for freeze indicators on all affected blocks */
6887  	wait_for_freeze_status(dd, 1);
6888  
6889  	/* SPC is now frozen */
6890  
6891  	/* do send PIO freeze steps */
6892  	pio_freeze(dd);
6893  
6894  	/* do send DMA freeze steps */
6895  	sdma_freeze(dd);
6896  
6897  	/* do send egress freeze steps - nothing to do */
6898  
6899  	/* do receive freeze steps */
6900  	rxe_freeze(dd);
6901  
6902  	/*
6903  	 * Unfreeze the hardware - clear the freeze, wait for each
6904  	 * block's frozen bit to clear, then clear the frozen flag.
6905  	 */
6906  	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6907  	wait_for_freeze_status(dd, 0);
6908  
6909  	if (is_ax(dd)) {
6910  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6911  		wait_for_freeze_status(dd, 1);
6912  		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6913  		wait_for_freeze_status(dd, 0);
6914  	}
6915  
6916  	/* do send PIO unfreeze steps for kernel contexts */
6917  	pio_kernel_unfreeze(dd);
6918  
6919  	/* do send DMA unfreeze steps */
6920  	sdma_unfreeze(dd);
6921  
6922  	/* do send egress unfreeze steps - nothing to do */
6923  
6924  	/* do receive unfreeze steps for kernel contexts */
6925  	rxe_kernel_unfreeze(dd);
6926  
6927  	/*
6928  	 * The unfreeze procedure touches global device registers when
6929  	 * it disables and re-enables RXE. Mark the device unfrozen
6930  	 * after all that is done so other parts of the driver waiting
6931  	 * for the device to unfreeze don't do things out of order.
6932  	 *
6933  	 * The above implies that the meaning of HFI1_FROZEN flag is
6934  	 * "Device has gone into freeze mode and freeze mode handling
6935  	 * is still in progress."
6936  	 *
6937  	 * The flag will be removed when freeze mode processing has
6938  	 * completed.
6939  	 */
6940  	dd->flags &= ~HFI1_FROZEN;
6941  	wake_up(&dd->event_queue);
6942  
6943  	/* no longer frozen */
6944  }
6945  
6946  /**
6947   * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6948   * counters.
6949   * @ppd: info of physical Hfi port
6950   * @link_width: new link width after link up or downgrade
6951   *
6952   * Update the PortXmitWait and PortVlXmitWait counters after
6953   * a link up or downgrade event to reflect a link width change.
6954   */
update_xmit_counters(struct hfi1_pportdata * ppd,u16 link_width)6955  static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6956  {
6957  	int i;
6958  	u16 tx_width;
6959  	u16 link_speed;
6960  
6961  	tx_width = tx_link_width(link_width);
6962  	link_speed = get_link_speed(ppd->link_speed_active);
6963  
6964  	/*
6965  	 * There are C_VL_COUNT number of PortVLXmitWait counters.
6966  	 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6967  	 */
6968  	for (i = 0; i < C_VL_COUNT + 1; i++)
6969  		get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6970  }
6971  
6972  /*
6973   * Handle a link up interrupt from the 8051.
6974   *
6975   * This is a work-queue function outside of the interrupt.
6976   */
handle_link_up(struct work_struct * work)6977  void handle_link_up(struct work_struct *work)
6978  {
6979  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6980  						  link_up_work);
6981  	struct hfi1_devdata *dd = ppd->dd;
6982  
6983  	set_link_state(ppd, HLS_UP_INIT);
6984  
6985  	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6986  	read_ltp_rtt(dd);
6987  	/*
6988  	 * OPA specifies that certain counters are cleared on a transition
6989  	 * to link up, so do that.
6990  	 */
6991  	clear_linkup_counters(dd);
6992  	/*
6993  	 * And (re)set link up default values.
6994  	 */
6995  	set_linkup_defaults(ppd);
6996  
6997  	/*
6998  	 * Set VL15 credits. Use cached value from verify cap interrupt.
6999  	 * In case of quick linkup or simulator, vl15 value will be set by
7000  	 * handle_linkup_change. VerifyCap interrupt handler will not be
7001  	 * called in those scenarios.
7002  	 */
7003  	if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7004  		set_up_vl15(dd, dd->vl15buf_cached);
7005  
7006  	/* enforce link speed enabled */
7007  	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7008  		/* oops - current speed is not enabled, bounce */
7009  		dd_dev_err(dd,
7010  			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7011  			   ppd->link_speed_active, ppd->link_speed_enabled);
7012  		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7013  				     OPA_LINKDOWN_REASON_SPEED_POLICY);
7014  		set_link_state(ppd, HLS_DN_OFFLINE);
7015  		start_link(ppd);
7016  	}
7017  }
7018  
7019  /*
7020   * Several pieces of LNI information were cached for SMA in ppd.
7021   * Reset these on link down
7022   */
reset_neighbor_info(struct hfi1_pportdata * ppd)7023  static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7024  {
7025  	ppd->neighbor_guid = 0;
7026  	ppd->neighbor_port_number = 0;
7027  	ppd->neighbor_type = 0;
7028  	ppd->neighbor_fm_security = 0;
7029  }
7030  
7031  static const char * const link_down_reason_strs[] = {
7032  	[OPA_LINKDOWN_REASON_NONE] = "None",
7033  	[OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7034  	[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7035  	[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7036  	[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7037  	[OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7038  	[OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7039  	[OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7040  	[OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7041  	[OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7042  	[OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7043  	[OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7044  	[OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7045  	[OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7046  	[OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7047  	[OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7048  	[OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7049  	[OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7050  	[OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7051  	[OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7052  	[OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7053  	[OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7054  	[OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7055  	[OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7056  	[OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7057  	[OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7058  	[OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7059  	[OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7060  	[OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7061  	[OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7062  	[OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7063  	[OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7064  	[OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7065  					"Excessive buffer overrun",
7066  	[OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7067  	[OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7068  	[OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7069  	[OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7070  	[OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7071  	[OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7072  	[OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7073  	[OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7074  					"Local media not installed",
7075  	[OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7076  	[OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7077  	[OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7078  					"End to end not installed",
7079  	[OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7080  	[OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7081  	[OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7082  	[OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7083  	[OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7084  	[OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7085  };
7086  
7087  /* return the neighbor link down reason string */
link_down_reason_str(u8 reason)7088  static const char *link_down_reason_str(u8 reason)
7089  {
7090  	const char *str = NULL;
7091  
7092  	if (reason < ARRAY_SIZE(link_down_reason_strs))
7093  		str = link_down_reason_strs[reason];
7094  	if (!str)
7095  		str = "(invalid)";
7096  
7097  	return str;
7098  }
7099  
7100  /*
7101   * Handle a link down interrupt from the 8051.
7102   *
7103   * This is a work-queue function outside of the interrupt.
7104   */
handle_link_down(struct work_struct * work)7105  void handle_link_down(struct work_struct *work)
7106  {
7107  	u8 lcl_reason, neigh_reason = 0;
7108  	u8 link_down_reason;
7109  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7110  						  link_down_work);
7111  	int was_up;
7112  	static const char ldr_str[] = "Link down reason: ";
7113  
7114  	if ((ppd->host_link_state &
7115  	     (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7116  	     ppd->port_type == PORT_TYPE_FIXED)
7117  		ppd->offline_disabled_reason =
7118  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7119  
7120  	/* Go offline first, then deal with reading/writing through 8051 */
7121  	was_up = !!(ppd->host_link_state & HLS_UP);
7122  	set_link_state(ppd, HLS_DN_OFFLINE);
7123  	xchg(&ppd->is_link_down_queued, 0);
7124  
7125  	if (was_up) {
7126  		lcl_reason = 0;
7127  		/* link down reason is only valid if the link was up */
7128  		read_link_down_reason(ppd->dd, &link_down_reason);
7129  		switch (link_down_reason) {
7130  		case LDR_LINK_TRANSFER_ACTIVE_LOW:
7131  			/* the link went down, no idle message reason */
7132  			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7133  				    ldr_str);
7134  			break;
7135  		case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7136  			/*
7137  			 * The neighbor reason is only valid if an idle message
7138  			 * was received for it.
7139  			 */
7140  			read_planned_down_reason_code(ppd->dd, &neigh_reason);
7141  			dd_dev_info(ppd->dd,
7142  				    "%sNeighbor link down message %d, %s\n",
7143  				    ldr_str, neigh_reason,
7144  				    link_down_reason_str(neigh_reason));
7145  			break;
7146  		case LDR_RECEIVED_HOST_OFFLINE_REQ:
7147  			dd_dev_info(ppd->dd,
7148  				    "%sHost requested link to go offline\n",
7149  				    ldr_str);
7150  			break;
7151  		default:
7152  			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7153  				    ldr_str, link_down_reason);
7154  			break;
7155  		}
7156  
7157  		/*
7158  		 * If no reason, assume peer-initiated but missed
7159  		 * LinkGoingDown idle flits.
7160  		 */
7161  		if (neigh_reason == 0)
7162  			lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7163  	} else {
7164  		/* went down while polling or going up */
7165  		lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7166  	}
7167  
7168  	set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7169  
7170  	/* inform the SMA when the link transitions from up to down */
7171  	if (was_up && ppd->local_link_down_reason.sma == 0 &&
7172  	    ppd->neigh_link_down_reason.sma == 0) {
7173  		ppd->local_link_down_reason.sma =
7174  					ppd->local_link_down_reason.latest;
7175  		ppd->neigh_link_down_reason.sma =
7176  					ppd->neigh_link_down_reason.latest;
7177  	}
7178  
7179  	reset_neighbor_info(ppd);
7180  
7181  	/* disable the port */
7182  	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7183  
7184  	/*
7185  	 * If there is no cable attached, turn the DC off. Otherwise,
7186  	 * start the link bring up.
7187  	 */
7188  	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7189  		dc_shutdown(ppd->dd);
7190  	else
7191  		start_link(ppd);
7192  }
7193  
handle_link_bounce(struct work_struct * work)7194  void handle_link_bounce(struct work_struct *work)
7195  {
7196  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7197  							link_bounce_work);
7198  
7199  	/*
7200  	 * Only do something if the link is currently up.
7201  	 */
7202  	if (ppd->host_link_state & HLS_UP) {
7203  		set_link_state(ppd, HLS_DN_OFFLINE);
7204  		start_link(ppd);
7205  	} else {
7206  		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7207  			    __func__, link_state_name(ppd->host_link_state));
7208  	}
7209  }
7210  
7211  /*
7212   * Mask conversion: Capability exchange to Port LTP.  The capability
7213   * exchange has an implicit 16b CRC that is mandatory.
7214   */
cap_to_port_ltp(int cap)7215  static int cap_to_port_ltp(int cap)
7216  {
7217  	int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7218  
7219  	if (cap & CAP_CRC_14B)
7220  		port_ltp |= PORT_LTP_CRC_MODE_14;
7221  	if (cap & CAP_CRC_48B)
7222  		port_ltp |= PORT_LTP_CRC_MODE_48;
7223  	if (cap & CAP_CRC_12B_16B_PER_LANE)
7224  		port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7225  
7226  	return port_ltp;
7227  }
7228  
7229  /*
7230   * Convert an OPA Port LTP mask to capability mask
7231   */
port_ltp_to_cap(int port_ltp)7232  int port_ltp_to_cap(int port_ltp)
7233  {
7234  	int cap_mask = 0;
7235  
7236  	if (port_ltp & PORT_LTP_CRC_MODE_14)
7237  		cap_mask |= CAP_CRC_14B;
7238  	if (port_ltp & PORT_LTP_CRC_MODE_48)
7239  		cap_mask |= CAP_CRC_48B;
7240  	if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7241  		cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7242  
7243  	return cap_mask;
7244  }
7245  
7246  /*
7247   * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7248   */
lcb_to_port_ltp(int lcb_crc)7249  static int lcb_to_port_ltp(int lcb_crc)
7250  {
7251  	int port_ltp = 0;
7252  
7253  	if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7254  		port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7255  	else if (lcb_crc == LCB_CRC_48B)
7256  		port_ltp = PORT_LTP_CRC_MODE_48;
7257  	else if (lcb_crc == LCB_CRC_14B)
7258  		port_ltp = PORT_LTP_CRC_MODE_14;
7259  	else
7260  		port_ltp = PORT_LTP_CRC_MODE_16;
7261  
7262  	return port_ltp;
7263  }
7264  
clear_full_mgmt_pkey(struct hfi1_pportdata * ppd)7265  static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7266  {
7267  	if (ppd->pkeys[2] != 0) {
7268  		ppd->pkeys[2] = 0;
7269  		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7270  		hfi1_event_pkey_change(ppd->dd, ppd->port);
7271  	}
7272  }
7273  
7274  /*
7275   * Convert the given link width to the OPA link width bitmask.
7276   */
link_width_to_bits(struct hfi1_devdata * dd,u16 width)7277  static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7278  {
7279  	switch (width) {
7280  	case 0:
7281  		/*
7282  		 * Simulator and quick linkup do not set the width.
7283  		 * Just set it to 4x without complaint.
7284  		 */
7285  		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7286  			return OPA_LINK_WIDTH_4X;
7287  		return 0; /* no lanes up */
7288  	case 1: return OPA_LINK_WIDTH_1X;
7289  	case 2: return OPA_LINK_WIDTH_2X;
7290  	case 3: return OPA_LINK_WIDTH_3X;
7291  	default:
7292  		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7293  			    __func__, width);
7294  		/* fall through */
7295  	case 4: return OPA_LINK_WIDTH_4X;
7296  	}
7297  }
7298  
7299  /*
7300   * Do a population count on the bottom nibble.
7301   */
7302  static const u8 bit_counts[16] = {
7303  	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7304  };
7305  
nibble_to_count(u8 nibble)7306  static inline u8 nibble_to_count(u8 nibble)
7307  {
7308  	return bit_counts[nibble & 0xf];
7309  }
7310  
7311  /*
7312   * Read the active lane information from the 8051 registers and return
7313   * their widths.
7314   *
7315   * Active lane information is found in these 8051 registers:
7316   *	enable_lane_tx
7317   *	enable_lane_rx
7318   */
get_link_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7319  static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7320  			    u16 *rx_width)
7321  {
7322  	u16 tx, rx;
7323  	u8 enable_lane_rx;
7324  	u8 enable_lane_tx;
7325  	u8 tx_polarity_inversion;
7326  	u8 rx_polarity_inversion;
7327  	u8 max_rate;
7328  
7329  	/* read the active lanes */
7330  	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7331  			 &rx_polarity_inversion, &max_rate);
7332  	read_local_lni(dd, &enable_lane_rx);
7333  
7334  	/* convert to counts */
7335  	tx = nibble_to_count(enable_lane_tx);
7336  	rx = nibble_to_count(enable_lane_rx);
7337  
7338  	/*
7339  	 * Set link_speed_active here, overriding what was set in
7340  	 * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7341  	 * set the max_rate field in handle_verify_cap until v0.19.
7342  	 */
7343  	if ((dd->icode == ICODE_RTL_SILICON) &&
7344  	    (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7345  		/* max_rate: 0 = 12.5G, 1 = 25G */
7346  		switch (max_rate) {
7347  		case 0:
7348  			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7349  			break;
7350  		default:
7351  			dd_dev_err(dd,
7352  				   "%s: unexpected max rate %d, using 25Gb\n",
7353  				   __func__, (int)max_rate);
7354  			/* fall through */
7355  		case 1:
7356  			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7357  			break;
7358  		}
7359  	}
7360  
7361  	dd_dev_info(dd,
7362  		    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7363  		    enable_lane_tx, tx, enable_lane_rx, rx);
7364  	*tx_width = link_width_to_bits(dd, tx);
7365  	*rx_width = link_width_to_bits(dd, rx);
7366  }
7367  
7368  /*
7369   * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7370   * Valid after the end of VerifyCap and during LinkUp.  Does not change
7371   * after link up.  I.e. look elsewhere for downgrade information.
7372   *
7373   * Bits are:
7374   *	+ bits [7:4] contain the number of active transmitters
7375   *	+ bits [3:0] contain the number of active receivers
7376   * These are numbers 1 through 4 and can be different values if the
7377   * link is asymmetric.
7378   *
7379   * verify_cap_local_fm_link_width[0] retains its original value.
7380   */
get_linkup_widths(struct hfi1_devdata * dd,u16 * tx_width,u16 * rx_width)7381  static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7382  			      u16 *rx_width)
7383  {
7384  	u16 widths, tx, rx;
7385  	u8 misc_bits, local_flags;
7386  	u16 active_tx, active_rx;
7387  
7388  	read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7389  	tx = widths >> 12;
7390  	rx = (widths >> 8) & 0xf;
7391  
7392  	*tx_width = link_width_to_bits(dd, tx);
7393  	*rx_width = link_width_to_bits(dd, rx);
7394  
7395  	/* print the active widths */
7396  	get_link_widths(dd, &active_tx, &active_rx);
7397  }
7398  
7399  /*
7400   * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7401   * hardware information when the link first comes up.
7402   *
7403   * The link width is not available until after VerifyCap.AllFramesReceived
7404   * (the trigger for handle_verify_cap), so this is outside that routine
7405   * and should be called when the 8051 signals linkup.
7406   */
get_linkup_link_widths(struct hfi1_pportdata * ppd)7407  void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7408  {
7409  	u16 tx_width, rx_width;
7410  
7411  	/* get end-of-LNI link widths */
7412  	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7413  
7414  	/* use tx_width as the link is supposed to be symmetric on link up */
7415  	ppd->link_width_active = tx_width;
7416  	/* link width downgrade active (LWD.A) starts out matching LW.A */
7417  	ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7418  	ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7419  	/* per OPA spec, on link up LWD.E resets to LWD.S */
7420  	ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7421  	/* cache the active egress rate (units {10^6 bits/sec]) */
7422  	ppd->current_egress_rate = active_egress_rate(ppd);
7423  }
7424  
7425  /*
7426   * Handle a verify capabilities interrupt from the 8051.
7427   *
7428   * This is a work-queue function outside of the interrupt.
7429   */
handle_verify_cap(struct work_struct * work)7430  void handle_verify_cap(struct work_struct *work)
7431  {
7432  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7433  								link_vc_work);
7434  	struct hfi1_devdata *dd = ppd->dd;
7435  	u64 reg;
7436  	u8 power_management;
7437  	u8 continuous;
7438  	u8 vcu;
7439  	u8 vau;
7440  	u8 z;
7441  	u16 vl15buf;
7442  	u16 link_widths;
7443  	u16 crc_mask;
7444  	u16 crc_val;
7445  	u16 device_id;
7446  	u16 active_tx, active_rx;
7447  	u8 partner_supported_crc;
7448  	u8 remote_tx_rate;
7449  	u8 device_rev;
7450  
7451  	set_link_state(ppd, HLS_VERIFY_CAP);
7452  
7453  	lcb_shutdown(dd, 0);
7454  	adjust_lcb_for_fpga_serdes(dd);
7455  
7456  	read_vc_remote_phy(dd, &power_management, &continuous);
7457  	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7458  			      &partner_supported_crc);
7459  	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7460  	read_remote_device_id(dd, &device_id, &device_rev);
7461  
7462  	/* print the active widths */
7463  	get_link_widths(dd, &active_tx, &active_rx);
7464  	dd_dev_info(dd,
7465  		    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7466  		    (int)power_management, (int)continuous);
7467  	dd_dev_info(dd,
7468  		    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7469  		    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7470  		    (int)partner_supported_crc);
7471  	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7472  		    (u32)remote_tx_rate, (u32)link_widths);
7473  	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7474  		    (u32)device_id, (u32)device_rev);
7475  	/*
7476  	 * The peer vAU value just read is the peer receiver value.  HFI does
7477  	 * not support a transmit vAU of 0 (AU == 8).  We advertised that
7478  	 * with Z=1 in the fabric capabilities sent to the peer.  The peer
7479  	 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7480  	 * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7481  	 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7482  	 * subject to the Z value exception.
7483  	 */
7484  	if (vau == 0)
7485  		vau = 1;
7486  	set_up_vau(dd, vau);
7487  
7488  	/*
7489  	 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7490  	 * credits value and wait for link-up interrupt ot set it.
7491  	 */
7492  	set_up_vl15(dd, 0);
7493  	dd->vl15buf_cached = vl15buf;
7494  
7495  	/* set up the LCB CRC mode */
7496  	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7497  
7498  	/* order is important: use the lowest bit in common */
7499  	if (crc_mask & CAP_CRC_14B)
7500  		crc_val = LCB_CRC_14B;
7501  	else if (crc_mask & CAP_CRC_48B)
7502  		crc_val = LCB_CRC_48B;
7503  	else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7504  		crc_val = LCB_CRC_12B_16B_PER_LANE;
7505  	else
7506  		crc_val = LCB_CRC_16B;
7507  
7508  	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7509  	write_csr(dd, DC_LCB_CFG_CRC_MODE,
7510  		  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7511  
7512  	/* set (14b only) or clear sideband credit */
7513  	reg = read_csr(dd, SEND_CM_CTRL);
7514  	if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7515  		write_csr(dd, SEND_CM_CTRL,
7516  			  reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7517  	} else {
7518  		write_csr(dd, SEND_CM_CTRL,
7519  			  reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7520  	}
7521  
7522  	ppd->link_speed_active = 0;	/* invalid value */
7523  	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7524  		/* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7525  		switch (remote_tx_rate) {
7526  		case 0:
7527  			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7528  			break;
7529  		case 1:
7530  			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7531  			break;
7532  		}
7533  	} else {
7534  		/* actual rate is highest bit of the ANDed rates */
7535  		u8 rate = remote_tx_rate & ppd->local_tx_rate;
7536  
7537  		if (rate & 2)
7538  			ppd->link_speed_active = OPA_LINK_SPEED_25G;
7539  		else if (rate & 1)
7540  			ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7541  	}
7542  	if (ppd->link_speed_active == 0) {
7543  		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7544  			   __func__, (int)remote_tx_rate);
7545  		ppd->link_speed_active = OPA_LINK_SPEED_25G;
7546  	}
7547  
7548  	/*
7549  	 * Cache the values of the supported, enabled, and active
7550  	 * LTP CRC modes to return in 'portinfo' queries. But the bit
7551  	 * flags that are returned in the portinfo query differ from
7552  	 * what's in the link_crc_mask, crc_sizes, and crc_val
7553  	 * variables. Convert these here.
7554  	 */
7555  	ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7556  		/* supported crc modes */
7557  	ppd->port_ltp_crc_mode |=
7558  		cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7559  		/* enabled crc modes */
7560  	ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7561  		/* active crc mode */
7562  
7563  	/* set up the remote credit return table */
7564  	assign_remote_cm_au_table(dd, vcu);
7565  
7566  	/*
7567  	 * The LCB is reset on entry to handle_verify_cap(), so this must
7568  	 * be applied on every link up.
7569  	 *
7570  	 * Adjust LCB error kill enable to kill the link if
7571  	 * these RBUF errors are seen:
7572  	 *	REPLAY_BUF_MBE_SMASK
7573  	 *	FLIT_INPUT_BUF_MBE_SMASK
7574  	 */
7575  	if (is_ax(dd)) {			/* fixed in B0 */
7576  		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7577  		reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7578  			| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7579  		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7580  	}
7581  
7582  	/* pull LCB fifos out of reset - all fifo clocks must be stable */
7583  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7584  
7585  	/* give 8051 access to the LCB CSRs */
7586  	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7587  	set_8051_lcb_access(dd);
7588  
7589  	/* tell the 8051 to go to LinkUp */
7590  	set_link_state(ppd, HLS_GOING_UP);
7591  }
7592  
7593  /**
7594   * apply_link_downgrade_policy - Apply the link width downgrade enabled
7595   * policy against the current active link widths.
7596   * @ppd: info of physical Hfi port
7597   * @refresh_widths: True indicates link downgrade event
7598   * @return: True indicates a successful link downgrade. False indicates
7599   *	    link downgrade event failed and the link will bounce back to
7600   *	    default link width.
7601   *
7602   * Called when the enabled policy changes or the active link widths
7603   * change.
7604   * Refresh_widths indicates that a link downgrade occurred. The
7605   * link_downgraded variable is set by refresh_widths and
7606   * determines the success/failure of the policy application.
7607   */
apply_link_downgrade_policy(struct hfi1_pportdata * ppd,bool refresh_widths)7608  bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7609  				 bool refresh_widths)
7610  {
7611  	int do_bounce = 0;
7612  	int tries;
7613  	u16 lwde;
7614  	u16 tx, rx;
7615  	bool link_downgraded = refresh_widths;
7616  
7617  	/* use the hls lock to avoid a race with actual link up */
7618  	tries = 0;
7619  retry:
7620  	mutex_lock(&ppd->hls_lock);
7621  	/* only apply if the link is up */
7622  	if (ppd->host_link_state & HLS_DOWN) {
7623  		/* still going up..wait and retry */
7624  		if (ppd->host_link_state & HLS_GOING_UP) {
7625  			if (++tries < 1000) {
7626  				mutex_unlock(&ppd->hls_lock);
7627  				usleep_range(100, 120); /* arbitrary */
7628  				goto retry;
7629  			}
7630  			dd_dev_err(ppd->dd,
7631  				   "%s: giving up waiting for link state change\n",
7632  				   __func__);
7633  		}
7634  		goto done;
7635  	}
7636  
7637  	lwde = ppd->link_width_downgrade_enabled;
7638  
7639  	if (refresh_widths) {
7640  		get_link_widths(ppd->dd, &tx, &rx);
7641  		ppd->link_width_downgrade_tx_active = tx;
7642  		ppd->link_width_downgrade_rx_active = rx;
7643  	}
7644  
7645  	if (ppd->link_width_downgrade_tx_active == 0 ||
7646  	    ppd->link_width_downgrade_rx_active == 0) {
7647  		/* the 8051 reported a dead link as a downgrade */
7648  		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7649  		link_downgraded = false;
7650  	} else if (lwde == 0) {
7651  		/* downgrade is disabled */
7652  
7653  		/* bounce if not at starting active width */
7654  		if ((ppd->link_width_active !=
7655  		     ppd->link_width_downgrade_tx_active) ||
7656  		    (ppd->link_width_active !=
7657  		     ppd->link_width_downgrade_rx_active)) {
7658  			dd_dev_err(ppd->dd,
7659  				   "Link downgrade is disabled and link has downgraded, downing link\n");
7660  			dd_dev_err(ppd->dd,
7661  				   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7662  				   ppd->link_width_active,
7663  				   ppd->link_width_downgrade_tx_active,
7664  				   ppd->link_width_downgrade_rx_active);
7665  			do_bounce = 1;
7666  			link_downgraded = false;
7667  		}
7668  	} else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7669  		   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7670  		/* Tx or Rx is outside the enabled policy */
7671  		dd_dev_err(ppd->dd,
7672  			   "Link is outside of downgrade allowed, downing link\n");
7673  		dd_dev_err(ppd->dd,
7674  			   "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7675  			   lwde, ppd->link_width_downgrade_tx_active,
7676  			   ppd->link_width_downgrade_rx_active);
7677  		do_bounce = 1;
7678  		link_downgraded = false;
7679  	}
7680  
7681  done:
7682  	mutex_unlock(&ppd->hls_lock);
7683  
7684  	if (do_bounce) {
7685  		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7686  				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7687  		set_link_state(ppd, HLS_DN_OFFLINE);
7688  		start_link(ppd);
7689  	}
7690  
7691  	return link_downgraded;
7692  }
7693  
7694  /*
7695   * Handle a link downgrade interrupt from the 8051.
7696   *
7697   * This is a work-queue function outside of the interrupt.
7698   */
handle_link_downgrade(struct work_struct * work)7699  void handle_link_downgrade(struct work_struct *work)
7700  {
7701  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7702  							link_downgrade_work);
7703  
7704  	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7705  	if (apply_link_downgrade_policy(ppd, true))
7706  		update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7707  }
7708  
dcc_err_string(char * buf,int buf_len,u64 flags)7709  static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7710  {
7711  	return flag_string(buf, buf_len, flags, dcc_err_flags,
7712  		ARRAY_SIZE(dcc_err_flags));
7713  }
7714  
lcb_err_string(char * buf,int buf_len,u64 flags)7715  static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7716  {
7717  	return flag_string(buf, buf_len, flags, lcb_err_flags,
7718  		ARRAY_SIZE(lcb_err_flags));
7719  }
7720  
dc8051_err_string(char * buf,int buf_len,u64 flags)7721  static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7722  {
7723  	return flag_string(buf, buf_len, flags, dc8051_err_flags,
7724  		ARRAY_SIZE(dc8051_err_flags));
7725  }
7726  
dc8051_info_err_string(char * buf,int buf_len,u64 flags)7727  static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7728  {
7729  	return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7730  		ARRAY_SIZE(dc8051_info_err_flags));
7731  }
7732  
dc8051_info_host_msg_string(char * buf,int buf_len,u64 flags)7733  static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7734  {
7735  	return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7736  		ARRAY_SIZE(dc8051_info_host_msg_flags));
7737  }
7738  
handle_8051_interrupt(struct hfi1_devdata * dd,u32 unused,u64 reg)7739  static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7740  {
7741  	struct hfi1_pportdata *ppd = dd->pport;
7742  	u64 info, err, host_msg;
7743  	int queue_link_down = 0;
7744  	char buf[96];
7745  
7746  	/* look at the flags */
7747  	if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7748  		/* 8051 information set by firmware */
7749  		/* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7750  		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7751  		err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7752  			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7753  		host_msg = (info >>
7754  			DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7755  			& DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7756  
7757  		/*
7758  		 * Handle error flags.
7759  		 */
7760  		if (err & FAILED_LNI) {
7761  			/*
7762  			 * LNI error indications are cleared by the 8051
7763  			 * only when starting polling.  Only pay attention
7764  			 * to them when in the states that occur during
7765  			 * LNI.
7766  			 */
7767  			if (ppd->host_link_state
7768  			    & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7769  				queue_link_down = 1;
7770  				dd_dev_info(dd, "Link error: %s\n",
7771  					    dc8051_info_err_string(buf,
7772  								   sizeof(buf),
7773  								   err &
7774  								   FAILED_LNI));
7775  			}
7776  			err &= ~(u64)FAILED_LNI;
7777  		}
7778  		/* unknown frames can happen durning LNI, just count */
7779  		if (err & UNKNOWN_FRAME) {
7780  			ppd->unknown_frame_count++;
7781  			err &= ~(u64)UNKNOWN_FRAME;
7782  		}
7783  		if (err) {
7784  			/* report remaining errors, but do not do anything */
7785  			dd_dev_err(dd, "8051 info error: %s\n",
7786  				   dc8051_info_err_string(buf, sizeof(buf),
7787  							  err));
7788  		}
7789  
7790  		/*
7791  		 * Handle host message flags.
7792  		 */
7793  		if (host_msg & HOST_REQ_DONE) {
7794  			/*
7795  			 * Presently, the driver does a busy wait for
7796  			 * host requests to complete.  This is only an
7797  			 * informational message.
7798  			 * NOTE: The 8051 clears the host message
7799  			 * information *on the next 8051 command*.
7800  			 * Therefore, when linkup is achieved,
7801  			 * this flag will still be set.
7802  			 */
7803  			host_msg &= ~(u64)HOST_REQ_DONE;
7804  		}
7805  		if (host_msg & BC_SMA_MSG) {
7806  			queue_work(ppd->link_wq, &ppd->sma_message_work);
7807  			host_msg &= ~(u64)BC_SMA_MSG;
7808  		}
7809  		if (host_msg & LINKUP_ACHIEVED) {
7810  			dd_dev_info(dd, "8051: Link up\n");
7811  			queue_work(ppd->link_wq, &ppd->link_up_work);
7812  			host_msg &= ~(u64)LINKUP_ACHIEVED;
7813  		}
7814  		if (host_msg & EXT_DEVICE_CFG_REQ) {
7815  			handle_8051_request(ppd);
7816  			host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7817  		}
7818  		if (host_msg & VERIFY_CAP_FRAME) {
7819  			queue_work(ppd->link_wq, &ppd->link_vc_work);
7820  			host_msg &= ~(u64)VERIFY_CAP_FRAME;
7821  		}
7822  		if (host_msg & LINK_GOING_DOWN) {
7823  			const char *extra = "";
7824  			/* no downgrade action needed if going down */
7825  			if (host_msg & LINK_WIDTH_DOWNGRADED) {
7826  				host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7827  				extra = " (ignoring downgrade)";
7828  			}
7829  			dd_dev_info(dd, "8051: Link down%s\n", extra);
7830  			queue_link_down = 1;
7831  			host_msg &= ~(u64)LINK_GOING_DOWN;
7832  		}
7833  		if (host_msg & LINK_WIDTH_DOWNGRADED) {
7834  			queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7835  			host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7836  		}
7837  		if (host_msg) {
7838  			/* report remaining messages, but do not do anything */
7839  			dd_dev_info(dd, "8051 info host message: %s\n",
7840  				    dc8051_info_host_msg_string(buf,
7841  								sizeof(buf),
7842  								host_msg));
7843  		}
7844  
7845  		reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7846  	}
7847  	if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7848  		/*
7849  		 * Lost the 8051 heartbeat.  If this happens, we
7850  		 * receive constant interrupts about it.  Disable
7851  		 * the interrupt after the first.
7852  		 */
7853  		dd_dev_err(dd, "Lost 8051 heartbeat\n");
7854  		write_csr(dd, DC_DC8051_ERR_EN,
7855  			  read_csr(dd, DC_DC8051_ERR_EN) &
7856  			  ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7857  
7858  		reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7859  	}
7860  	if (reg) {
7861  		/* report the error, but do not do anything */
7862  		dd_dev_err(dd, "8051 error: %s\n",
7863  			   dc8051_err_string(buf, sizeof(buf), reg));
7864  	}
7865  
7866  	if (queue_link_down) {
7867  		/*
7868  		 * if the link is already going down or disabled, do not
7869  		 * queue another. If there's a link down entry already
7870  		 * queued, don't queue another one.
7871  		 */
7872  		if ((ppd->host_link_state &
7873  		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7874  		    ppd->link_enabled == 0) {
7875  			dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7876  				    __func__, ppd->host_link_state,
7877  				    ppd->link_enabled);
7878  		} else {
7879  			if (xchg(&ppd->is_link_down_queued, 1) == 1)
7880  				dd_dev_info(dd,
7881  					    "%s: link down request already queued\n",
7882  					    __func__);
7883  			else
7884  				queue_work(ppd->link_wq, &ppd->link_down_work);
7885  		}
7886  	}
7887  }
7888  
7889  static const char * const fm_config_txt[] = {
7890  [0] =
7891  	"BadHeadDist: Distance violation between two head flits",
7892  [1] =
7893  	"BadTailDist: Distance violation between two tail flits",
7894  [2] =
7895  	"BadCtrlDist: Distance violation between two credit control flits",
7896  [3] =
7897  	"BadCrdAck: Credits return for unsupported VL",
7898  [4] =
7899  	"UnsupportedVLMarker: Received VL Marker",
7900  [5] =
7901  	"BadPreempt: Exceeded the preemption nesting level",
7902  [6] =
7903  	"BadControlFlit: Received unsupported control flit",
7904  /* no 7 */
7905  [8] =
7906  	"UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7907  };
7908  
7909  static const char * const port_rcv_txt[] = {
7910  [1] =
7911  	"BadPktLen: Illegal PktLen",
7912  [2] =
7913  	"PktLenTooLong: Packet longer than PktLen",
7914  [3] =
7915  	"PktLenTooShort: Packet shorter than PktLen",
7916  [4] =
7917  	"BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7918  [5] =
7919  	"BadDLID: Illegal DLID (0, doesn't match HFI)",
7920  [6] =
7921  	"BadL2: Illegal L2 opcode",
7922  [7] =
7923  	"BadSC: Unsupported SC",
7924  [9] =
7925  	"BadRC: Illegal RC",
7926  [11] =
7927  	"PreemptError: Preempting with same VL",
7928  [12] =
7929  	"PreemptVL15: Preempting a VL15 packet",
7930  };
7931  
7932  #define OPA_LDR_FMCONFIG_OFFSET 16
7933  #define OPA_LDR_PORTRCV_OFFSET 0
handle_dcc_err(struct hfi1_devdata * dd,u32 unused,u64 reg)7934  static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7935  {
7936  	u64 info, hdr0, hdr1;
7937  	const char *extra;
7938  	char buf[96];
7939  	struct hfi1_pportdata *ppd = dd->pport;
7940  	u8 lcl_reason = 0;
7941  	int do_bounce = 0;
7942  
7943  	if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7944  		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7945  			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7946  			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7947  			/* set status bit */
7948  			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7949  		}
7950  		reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7951  	}
7952  
7953  	if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7954  		struct hfi1_pportdata *ppd = dd->pport;
7955  		/* this counter saturates at (2^32) - 1 */
7956  		if (ppd->link_downed < (u32)UINT_MAX)
7957  			ppd->link_downed++;
7958  		reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7959  	}
7960  
7961  	if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7962  		u8 reason_valid = 1;
7963  
7964  		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7965  		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7966  			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7967  			/* set status bit */
7968  			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7969  		}
7970  		switch (info) {
7971  		case 0:
7972  		case 1:
7973  		case 2:
7974  		case 3:
7975  		case 4:
7976  		case 5:
7977  		case 6:
7978  			extra = fm_config_txt[info];
7979  			break;
7980  		case 8:
7981  			extra = fm_config_txt[info];
7982  			if (ppd->port_error_action &
7983  			    OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7984  				do_bounce = 1;
7985  				/*
7986  				 * lcl_reason cannot be derived from info
7987  				 * for this error
7988  				 */
7989  				lcl_reason =
7990  				  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7991  			}
7992  			break;
7993  		default:
7994  			reason_valid = 0;
7995  			snprintf(buf, sizeof(buf), "reserved%lld", info);
7996  			extra = buf;
7997  			break;
7998  		}
7999  
8000  		if (reason_valid && !do_bounce) {
8001  			do_bounce = ppd->port_error_action &
8002  					(1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8003  			lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8004  		}
8005  
8006  		/* just report this */
8007  		dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8008  					extra);
8009  		reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8010  	}
8011  
8012  	if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8013  		u8 reason_valid = 1;
8014  
8015  		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8016  		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8017  		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8018  		if (!(dd->err_info_rcvport.status_and_code &
8019  		      OPA_EI_STATUS_SMASK)) {
8020  			dd->err_info_rcvport.status_and_code =
8021  				info & OPA_EI_CODE_SMASK;
8022  			/* set status bit */
8023  			dd->err_info_rcvport.status_and_code |=
8024  				OPA_EI_STATUS_SMASK;
8025  			/*
8026  			 * save first 2 flits in the packet that caused
8027  			 * the error
8028  			 */
8029  			dd->err_info_rcvport.packet_flit1 = hdr0;
8030  			dd->err_info_rcvport.packet_flit2 = hdr1;
8031  		}
8032  		switch (info) {
8033  		case 1:
8034  		case 2:
8035  		case 3:
8036  		case 4:
8037  		case 5:
8038  		case 6:
8039  		case 7:
8040  		case 9:
8041  		case 11:
8042  		case 12:
8043  			extra = port_rcv_txt[info];
8044  			break;
8045  		default:
8046  			reason_valid = 0;
8047  			snprintf(buf, sizeof(buf), "reserved%lld", info);
8048  			extra = buf;
8049  			break;
8050  		}
8051  
8052  		if (reason_valid && !do_bounce) {
8053  			do_bounce = ppd->port_error_action &
8054  					(1 << (OPA_LDR_PORTRCV_OFFSET + info));
8055  			lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8056  		}
8057  
8058  		/* just report this */
8059  		dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8060  					"               hdr0 0x%llx, hdr1 0x%llx\n",
8061  					extra, hdr0, hdr1);
8062  
8063  		reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8064  	}
8065  
8066  	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8067  		/* informative only */
8068  		dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8069  		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8070  	}
8071  	if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8072  		/* informative only */
8073  		dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8074  		reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8075  	}
8076  
8077  	if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8078  		reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8079  
8080  	/* report any remaining errors */
8081  	if (reg)
8082  		dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8083  					dcc_err_string(buf, sizeof(buf), reg));
8084  
8085  	if (lcl_reason == 0)
8086  		lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8087  
8088  	if (do_bounce) {
8089  		dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8090  					__func__);
8091  		set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8092  		queue_work(ppd->link_wq, &ppd->link_bounce_work);
8093  	}
8094  }
8095  
handle_lcb_err(struct hfi1_devdata * dd,u32 unused,u64 reg)8096  static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8097  {
8098  	char buf[96];
8099  
8100  	dd_dev_info(dd, "LCB Error: %s\n",
8101  		    lcb_err_string(buf, sizeof(buf), reg));
8102  }
8103  
8104  /*
8105   * CCE block DC interrupt.  Source is < 8.
8106   */
is_dc_int(struct hfi1_devdata * dd,unsigned int source)8107  static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8108  {
8109  	const struct err_reg_info *eri = &dc_errs[source];
8110  
8111  	if (eri->handler) {
8112  		interrupt_clear_down(dd, 0, eri);
8113  	} else if (source == 3 /* dc_lbm_int */) {
8114  		/*
8115  		 * This indicates that a parity error has occurred on the
8116  		 * address/control lines presented to the LBM.  The error
8117  		 * is a single pulse, there is no associated error flag,
8118  		 * and it is non-maskable.  This is because if a parity
8119  		 * error occurs on the request the request is dropped.
8120  		 * This should never occur, but it is nice to know if it
8121  		 * ever does.
8122  		 */
8123  		dd_dev_err(dd, "Parity error in DC LBM block\n");
8124  	} else {
8125  		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8126  	}
8127  }
8128  
8129  /*
8130   * TX block send credit interrupt.  Source is < 160.
8131   */
is_send_credit_int(struct hfi1_devdata * dd,unsigned int source)8132  static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8133  {
8134  	sc_group_release_update(dd, source);
8135  }
8136  
8137  /*
8138   * TX block SDMA interrupt.  Source is < 48.
8139   *
8140   * SDMA interrupts are grouped by type:
8141   *
8142   *	 0 -  N-1 = SDma
8143   *	 N - 2N-1 = SDmaProgress
8144   *	2N - 3N-1 = SDmaIdle
8145   */
is_sdma_eng_int(struct hfi1_devdata * dd,unsigned int source)8146  static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8147  {
8148  	/* what interrupt */
8149  	unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8150  	/* which engine */
8151  	unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8152  
8153  #ifdef CONFIG_SDMA_VERBOSITY
8154  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8155  		   slashstrip(__FILE__), __LINE__, __func__);
8156  	sdma_dumpstate(&dd->per_sdma[which]);
8157  #endif
8158  
8159  	if (likely(what < 3 && which < dd->num_sdma)) {
8160  		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8161  	} else {
8162  		/* should not happen */
8163  		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8164  	}
8165  }
8166  
8167  /**
8168   * is_rcv_avail_int() - User receive context available IRQ handler
8169   * @dd: valid dd
8170   * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8171   *
8172   * RX block receive available interrupt.  Source is < 160.
8173   *
8174   * This is the general interrupt handler for user (PSM) receive contexts,
8175   * and can only be used for non-threaded IRQs.
8176   */
is_rcv_avail_int(struct hfi1_devdata * dd,unsigned int source)8177  static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8178  {
8179  	struct hfi1_ctxtdata *rcd;
8180  	char *err_detail;
8181  
8182  	if (likely(source < dd->num_rcv_contexts)) {
8183  		rcd = hfi1_rcd_get_by_index(dd, source);
8184  		if (rcd) {
8185  			handle_user_interrupt(rcd);
8186  			hfi1_rcd_put(rcd);
8187  			return;	/* OK */
8188  		}
8189  		/* received an interrupt, but no rcd */
8190  		err_detail = "dataless";
8191  	} else {
8192  		/* received an interrupt, but are not using that context */
8193  		err_detail = "out of range";
8194  	}
8195  	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8196  		   err_detail, source);
8197  }
8198  
8199  /**
8200   * is_rcv_urgent_int() - User receive context urgent IRQ handler
8201   * @dd: valid dd
8202   * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8203   *
8204   * RX block receive urgent interrupt.  Source is < 160.
8205   *
8206   * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8207   */
is_rcv_urgent_int(struct hfi1_devdata * dd,unsigned int source)8208  static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8209  {
8210  	struct hfi1_ctxtdata *rcd;
8211  	char *err_detail;
8212  
8213  	if (likely(source < dd->num_rcv_contexts)) {
8214  		rcd = hfi1_rcd_get_by_index(dd, source);
8215  		if (rcd) {
8216  			handle_user_interrupt(rcd);
8217  			hfi1_rcd_put(rcd);
8218  			return;	/* OK */
8219  		}
8220  		/* received an interrupt, but no rcd */
8221  		err_detail = "dataless";
8222  	} else {
8223  		/* received an interrupt, but are not using that context */
8224  		err_detail = "out of range";
8225  	}
8226  	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8227  		   err_detail, source);
8228  }
8229  
8230  /*
8231   * Reserved range interrupt.  Should not be called in normal operation.
8232   */
is_reserved_int(struct hfi1_devdata * dd,unsigned int source)8233  static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8234  {
8235  	char name[64];
8236  
8237  	dd_dev_err(dd, "unexpected %s interrupt\n",
8238  		   is_reserved_name(name, sizeof(name), source));
8239  }
8240  
8241  static const struct is_table is_table[] = {
8242  /*
8243   * start		 end
8244   *				name func		interrupt func
8245   */
8246  { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8247  				is_misc_err_name,	is_misc_err_int },
8248  { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8249  				is_sdma_eng_err_name,	is_sdma_eng_err_int },
8250  { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8251  				is_sendctxt_err_name,	is_sendctxt_err_int },
8252  { IS_SDMA_START,	     IS_SDMA_IDLE_END,
8253  				is_sdma_eng_name,	is_sdma_eng_int },
8254  { IS_VARIOUS_START,	     IS_VARIOUS_END,
8255  				is_various_name,	is_various_int },
8256  { IS_DC_START,	     IS_DC_END,
8257  				is_dc_name,		is_dc_int },
8258  { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8259  				is_rcv_avail_name,	is_rcv_avail_int },
8260  { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8261  				is_rcv_urgent_name,	is_rcv_urgent_int },
8262  { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8263  				is_send_credit_name,	is_send_credit_int},
8264  { IS_RESERVED_START,     IS_RESERVED_END,
8265  				is_reserved_name,	is_reserved_int},
8266  };
8267  
8268  /*
8269   * Interrupt source interrupt - called when the given source has an interrupt.
8270   * Source is a bit index into an array of 64-bit integers.
8271   */
is_interrupt(struct hfi1_devdata * dd,unsigned int source)8272  static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8273  {
8274  	const struct is_table *entry;
8275  
8276  	/* avoids a double compare by walking the table in-order */
8277  	for (entry = &is_table[0]; entry->is_name; entry++) {
8278  		if (source <= entry->end) {
8279  			trace_hfi1_interrupt(dd, entry, source);
8280  			entry->is_int(dd, source - entry->start);
8281  			return;
8282  		}
8283  	}
8284  	/* fell off the end */
8285  	dd_dev_err(dd, "invalid interrupt source %u\n", source);
8286  }
8287  
8288  /**
8289   * gerneral_interrupt() -  General interrupt handler
8290   * @irq: MSIx IRQ vector
8291   * @data: hfi1 devdata
8292   *
8293   * This is able to correctly handle all non-threaded interrupts.  Receive
8294   * context DATA IRQs are threaded and are not supported by this handler.
8295   *
8296   */
general_interrupt(int irq,void * data)8297  irqreturn_t general_interrupt(int irq, void *data)
8298  {
8299  	struct hfi1_devdata *dd = data;
8300  	u64 regs[CCE_NUM_INT_CSRS];
8301  	u32 bit;
8302  	int i;
8303  	irqreturn_t handled = IRQ_NONE;
8304  
8305  	this_cpu_inc(*dd->int_counter);
8306  
8307  	/* phase 1: scan and clear all handled interrupts */
8308  	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8309  		if (dd->gi_mask[i] == 0) {
8310  			regs[i] = 0;	/* used later */
8311  			continue;
8312  		}
8313  		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8314  				dd->gi_mask[i];
8315  		/* only clear if anything is set */
8316  		if (regs[i])
8317  			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8318  	}
8319  
8320  	/* phase 2: call the appropriate handler */
8321  	for_each_set_bit(bit, (unsigned long *)&regs[0],
8322  			 CCE_NUM_INT_CSRS * 64) {
8323  		is_interrupt(dd, bit);
8324  		handled = IRQ_HANDLED;
8325  	}
8326  
8327  	return handled;
8328  }
8329  
sdma_interrupt(int irq,void * data)8330  irqreturn_t sdma_interrupt(int irq, void *data)
8331  {
8332  	struct sdma_engine *sde = data;
8333  	struct hfi1_devdata *dd = sde->dd;
8334  	u64 status;
8335  
8336  #ifdef CONFIG_SDMA_VERBOSITY
8337  	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8338  		   slashstrip(__FILE__), __LINE__, __func__);
8339  	sdma_dumpstate(sde);
8340  #endif
8341  
8342  	this_cpu_inc(*dd->int_counter);
8343  
8344  	/* This read_csr is really bad in the hot path */
8345  	status = read_csr(dd,
8346  			  CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8347  			  & sde->imask;
8348  	if (likely(status)) {
8349  		/* clear the interrupt(s) */
8350  		write_csr(dd,
8351  			  CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8352  			  status);
8353  
8354  		/* handle the interrupt(s) */
8355  		sdma_engine_interrupt(sde, status);
8356  	} else {
8357  		dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8358  					sde->this_idx);
8359  	}
8360  	return IRQ_HANDLED;
8361  }
8362  
8363  /*
8364   * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8365   * to insure that the write completed.  This does NOT guarantee that
8366   * queued DMA writes to memory from the chip are pushed.
8367   */
clear_recv_intr(struct hfi1_ctxtdata * rcd)8368  static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8369  {
8370  	struct hfi1_devdata *dd = rcd->dd;
8371  	u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8372  
8373  	write_csr(dd, addr, rcd->imask);
8374  	/* force the above write on the chip and get a value back */
8375  	(void)read_csr(dd, addr);
8376  }
8377  
8378  /* force the receive interrupt */
force_recv_intr(struct hfi1_ctxtdata * rcd)8379  void force_recv_intr(struct hfi1_ctxtdata *rcd)
8380  {
8381  	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8382  }
8383  
8384  /*
8385   * Return non-zero if a packet is present.
8386   *
8387   * This routine is called when rechecking for packets after the RcvAvail
8388   * interrupt has been cleared down.  First, do a quick check of memory for
8389   * a packet present.  If not found, use an expensive CSR read of the context
8390   * tail to determine the actual tail.  The CSR read is necessary because there
8391   * is no method to push pending DMAs to memory other than an interrupt and we
8392   * are trying to determine if we need to force an interrupt.
8393   */
check_packet_present(struct hfi1_ctxtdata * rcd)8394  static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8395  {
8396  	u32 tail;
8397  	int present;
8398  
8399  	if (!rcd->rcvhdrtail_kvaddr)
8400  		present = (rcd->seq_cnt ==
8401  				rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8402  	else /* is RDMA rtail */
8403  		present = (rcd->head != get_rcvhdrtail(rcd));
8404  
8405  	if (present)
8406  		return 1;
8407  
8408  	/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8409  	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8410  	return rcd->head != tail;
8411  }
8412  
8413  /*
8414   * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8415   * This routine will try to handle packets immediately (latency), but if
8416   * it finds too many, it will invoke the thread handler (bandwitdh).  The
8417   * chip receive interrupt is *not* cleared down until this or the thread (if
8418   * invoked) is finished.  The intent is to avoid extra interrupts while we
8419   * are processing packets anyway.
8420   */
receive_context_interrupt(int irq,void * data)8421  irqreturn_t receive_context_interrupt(int irq, void *data)
8422  {
8423  	struct hfi1_ctxtdata *rcd = data;
8424  	struct hfi1_devdata *dd = rcd->dd;
8425  	int disposition;
8426  	int present;
8427  
8428  	trace_hfi1_receive_interrupt(dd, rcd);
8429  	this_cpu_inc(*dd->int_counter);
8430  	aspm_ctx_disable(rcd);
8431  
8432  	/* receive interrupt remains blocked while processing packets */
8433  	disposition = rcd->do_interrupt(rcd, 0);
8434  
8435  	/*
8436  	 * Too many packets were seen while processing packets in this
8437  	 * IRQ handler.  Invoke the handler thread.  The receive interrupt
8438  	 * remains blocked.
8439  	 */
8440  	if (disposition == RCV_PKT_LIMIT)
8441  		return IRQ_WAKE_THREAD;
8442  
8443  	/*
8444  	 * The packet processor detected no more packets.  Clear the receive
8445  	 * interrupt and recheck for a packet packet that may have arrived
8446  	 * after the previous check and interrupt clear.  If a packet arrived,
8447  	 * force another interrupt.
8448  	 */
8449  	clear_recv_intr(rcd);
8450  	present = check_packet_present(rcd);
8451  	if (present)
8452  		force_recv_intr(rcd);
8453  
8454  	return IRQ_HANDLED;
8455  }
8456  
8457  /*
8458   * Receive packet thread handler.  This expects to be invoked with the
8459   * receive interrupt still blocked.
8460   */
receive_context_thread(int irq,void * data)8461  irqreturn_t receive_context_thread(int irq, void *data)
8462  {
8463  	struct hfi1_ctxtdata *rcd = data;
8464  	int present;
8465  
8466  	/* receive interrupt is still blocked from the IRQ handler */
8467  	(void)rcd->do_interrupt(rcd, 1);
8468  
8469  	/*
8470  	 * The packet processor will only return if it detected no more
8471  	 * packets.  Hold IRQs here so we can safely clear the interrupt and
8472  	 * recheck for a packet that may have arrived after the previous
8473  	 * check and the interrupt clear.  If a packet arrived, force another
8474  	 * interrupt.
8475  	 */
8476  	local_irq_disable();
8477  	clear_recv_intr(rcd);
8478  	present = check_packet_present(rcd);
8479  	if (present)
8480  		force_recv_intr(rcd);
8481  	local_irq_enable();
8482  
8483  	return IRQ_HANDLED;
8484  }
8485  
8486  /* ========================================================================= */
8487  
read_physical_state(struct hfi1_devdata * dd)8488  u32 read_physical_state(struct hfi1_devdata *dd)
8489  {
8490  	u64 reg;
8491  
8492  	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8493  	return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8494  				& DC_DC8051_STS_CUR_STATE_PORT_MASK;
8495  }
8496  
read_logical_state(struct hfi1_devdata * dd)8497  u32 read_logical_state(struct hfi1_devdata *dd)
8498  {
8499  	u64 reg;
8500  
8501  	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8502  	return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8503  				& DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8504  }
8505  
set_logical_state(struct hfi1_devdata * dd,u32 chip_lstate)8506  static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8507  {
8508  	u64 reg;
8509  
8510  	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8511  	/* clear current state, set new state */
8512  	reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8513  	reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8514  	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8515  }
8516  
8517  /*
8518   * Use the 8051 to read a LCB CSR.
8519   */
read_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 * data)8520  static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8521  {
8522  	u32 regno;
8523  	int ret;
8524  
8525  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8526  		if (acquire_lcb_access(dd, 0) == 0) {
8527  			*data = read_csr(dd, addr);
8528  			release_lcb_access(dd, 0);
8529  			return 0;
8530  		}
8531  		return -EBUSY;
8532  	}
8533  
8534  	/* register is an index of LCB registers: (offset - base) / 8 */
8535  	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8536  	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8537  	if (ret != HCMD_SUCCESS)
8538  		return -EBUSY;
8539  	return 0;
8540  }
8541  
8542  /*
8543   * Provide a cache for some of the LCB registers in case the LCB is
8544   * unavailable.
8545   * (The LCB is unavailable in certain link states, for example.)
8546   */
8547  struct lcb_datum {
8548  	u32 off;
8549  	u64 val;
8550  };
8551  
8552  static struct lcb_datum lcb_cache[] = {
8553  	{ DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8554  	{ DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8555  	{ DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8556  };
8557  
update_lcb_cache(struct hfi1_devdata * dd)8558  static void update_lcb_cache(struct hfi1_devdata *dd)
8559  {
8560  	int i;
8561  	int ret;
8562  	u64 val;
8563  
8564  	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8565  		ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8566  
8567  		/* Update if we get good data */
8568  		if (likely(ret != -EBUSY))
8569  			lcb_cache[i].val = val;
8570  	}
8571  }
8572  
read_lcb_cache(u32 off,u64 * val)8573  static int read_lcb_cache(u32 off, u64 *val)
8574  {
8575  	int i;
8576  
8577  	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8578  		if (lcb_cache[i].off == off) {
8579  			*val = lcb_cache[i].val;
8580  			return 0;
8581  		}
8582  	}
8583  
8584  	pr_warn("%s bad offset 0x%x\n", __func__, off);
8585  	return -1;
8586  }
8587  
8588  /*
8589   * Read an LCB CSR.  Access may not be in host control, so check.
8590   * Return 0 on success, -EBUSY on failure.
8591   */
read_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 * data)8592  int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8593  {
8594  	struct hfi1_pportdata *ppd = dd->pport;
8595  
8596  	/* if up, go through the 8051 for the value */
8597  	if (ppd->host_link_state & HLS_UP)
8598  		return read_lcb_via_8051(dd, addr, data);
8599  	/* if going up or down, check the cache, otherwise, no access */
8600  	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8601  		if (read_lcb_cache(addr, data))
8602  			return -EBUSY;
8603  		return 0;
8604  	}
8605  
8606  	/* otherwise, host has access */
8607  	*data = read_csr(dd, addr);
8608  	return 0;
8609  }
8610  
8611  /*
8612   * Use the 8051 to write a LCB CSR.
8613   */
write_lcb_via_8051(struct hfi1_devdata * dd,u32 addr,u64 data)8614  static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8615  {
8616  	u32 regno;
8617  	int ret;
8618  
8619  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8620  	    (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8621  		if (acquire_lcb_access(dd, 0) == 0) {
8622  			write_csr(dd, addr, data);
8623  			release_lcb_access(dd, 0);
8624  			return 0;
8625  		}
8626  		return -EBUSY;
8627  	}
8628  
8629  	/* register is an index of LCB registers: (offset - base) / 8 */
8630  	regno = (addr - DC_LCB_CFG_RUN) >> 3;
8631  	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8632  	if (ret != HCMD_SUCCESS)
8633  		return -EBUSY;
8634  	return 0;
8635  }
8636  
8637  /*
8638   * Write an LCB CSR.  Access may not be in host control, so check.
8639   * Return 0 on success, -EBUSY on failure.
8640   */
write_lcb_csr(struct hfi1_devdata * dd,u32 addr,u64 data)8641  int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8642  {
8643  	struct hfi1_pportdata *ppd = dd->pport;
8644  
8645  	/* if up, go through the 8051 for the value */
8646  	if (ppd->host_link_state & HLS_UP)
8647  		return write_lcb_via_8051(dd, addr, data);
8648  	/* if going up or down, no access */
8649  	if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8650  		return -EBUSY;
8651  	/* otherwise, host has access */
8652  	write_csr(dd, addr, data);
8653  	return 0;
8654  }
8655  
8656  /*
8657   * Returns:
8658   *	< 0 = Linux error, not able to get access
8659   *	> 0 = 8051 command RETURN_CODE
8660   */
do_8051_command(struct hfi1_devdata * dd,u32 type,u64 in_data,u64 * out_data)8661  static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8662  			   u64 *out_data)
8663  {
8664  	u64 reg, completed;
8665  	int return_code;
8666  	unsigned long timeout;
8667  
8668  	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8669  
8670  	mutex_lock(&dd->dc8051_lock);
8671  
8672  	/* We can't send any commands to the 8051 if it's in reset */
8673  	if (dd->dc_shutdown) {
8674  		return_code = -ENODEV;
8675  		goto fail;
8676  	}
8677  
8678  	/*
8679  	 * If an 8051 host command timed out previously, then the 8051 is
8680  	 * stuck.
8681  	 *
8682  	 * On first timeout, attempt to reset and restart the entire DC
8683  	 * block (including 8051). (Is this too big of a hammer?)
8684  	 *
8685  	 * If the 8051 times out a second time, the reset did not bring it
8686  	 * back to healthy life. In that case, fail any subsequent commands.
8687  	 */
8688  	if (dd->dc8051_timed_out) {
8689  		if (dd->dc8051_timed_out > 1) {
8690  			dd_dev_err(dd,
8691  				   "Previous 8051 host command timed out, skipping command %u\n",
8692  				   type);
8693  			return_code = -ENXIO;
8694  			goto fail;
8695  		}
8696  		_dc_shutdown(dd);
8697  		_dc_start(dd);
8698  	}
8699  
8700  	/*
8701  	 * If there is no timeout, then the 8051 command interface is
8702  	 * waiting for a command.
8703  	 */
8704  
8705  	/*
8706  	 * When writing a LCB CSR, out_data contains the full value to
8707  	 * to be written, while in_data contains the relative LCB
8708  	 * address in 7:0.  Do the work here, rather than the caller,
8709  	 * of distrubting the write data to where it needs to go:
8710  	 *
8711  	 * Write data
8712  	 *   39:00 -> in_data[47:8]
8713  	 *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8714  	 *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8715  	 */
8716  	if (type == HCMD_WRITE_LCB_CSR) {
8717  		in_data |= ((*out_data) & 0xffffffffffull) << 8;
8718  		/* must preserve COMPLETED - it is tied to hardware */
8719  		reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8720  		reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8721  		reg |= ((((*out_data) >> 40) & 0xff) <<
8722  				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8723  		      | ((((*out_data) >> 48) & 0xffff) <<
8724  				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8725  		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8726  	}
8727  
8728  	/*
8729  	 * Do two writes: the first to stabilize the type and req_data, the
8730  	 * second to activate.
8731  	 */
8732  	reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8733  			<< DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8734  		| (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8735  			<< DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8736  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8737  	reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8738  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8739  
8740  	/* wait for completion, alternate: interrupt */
8741  	timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8742  	while (1) {
8743  		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8744  		completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8745  		if (completed)
8746  			break;
8747  		if (time_after(jiffies, timeout)) {
8748  			dd->dc8051_timed_out++;
8749  			dd_dev_err(dd, "8051 host command %u timeout\n", type);
8750  			if (out_data)
8751  				*out_data = 0;
8752  			return_code = -ETIMEDOUT;
8753  			goto fail;
8754  		}
8755  		udelay(2);
8756  	}
8757  
8758  	if (out_data) {
8759  		*out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8760  				& DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8761  		if (type == HCMD_READ_LCB_CSR) {
8762  			/* top 16 bits are in a different register */
8763  			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8764  				& DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8765  				<< (48
8766  				    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8767  		}
8768  	}
8769  	return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8770  				& DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8771  	dd->dc8051_timed_out = 0;
8772  	/*
8773  	 * Clear command for next user.
8774  	 */
8775  	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8776  
8777  fail:
8778  	mutex_unlock(&dd->dc8051_lock);
8779  	return return_code;
8780  }
8781  
set_physical_link_state(struct hfi1_devdata * dd,u64 state)8782  static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8783  {
8784  	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8785  }
8786  
load_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 config_data)8787  int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8788  		     u8 lane_id, u32 config_data)
8789  {
8790  	u64 data;
8791  	int ret;
8792  
8793  	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8794  		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8795  		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
8796  	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8797  	if (ret != HCMD_SUCCESS) {
8798  		dd_dev_err(dd,
8799  			   "load 8051 config: field id %d, lane %d, err %d\n",
8800  			   (int)field_id, (int)lane_id, ret);
8801  	}
8802  	return ret;
8803  }
8804  
8805  /*
8806   * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8807   * set the result, even on error.
8808   * Return 0 on success, -errno on failure
8809   */
read_8051_config(struct hfi1_devdata * dd,u8 field_id,u8 lane_id,u32 * result)8810  int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8811  		     u32 *result)
8812  {
8813  	u64 big_data;
8814  	u32 addr;
8815  	int ret;
8816  
8817  	/* address start depends on the lane_id */
8818  	if (lane_id < 4)
8819  		addr = (4 * NUM_GENERAL_FIELDS)
8820  			+ (lane_id * 4 * NUM_LANE_FIELDS);
8821  	else
8822  		addr = 0;
8823  	addr += field_id * 4;
8824  
8825  	/* read is in 8-byte chunks, hardware will truncate the address down */
8826  	ret = read_8051_data(dd, addr, 8, &big_data);
8827  
8828  	if (ret == 0) {
8829  		/* extract the 4 bytes we want */
8830  		if (addr & 0x4)
8831  			*result = (u32)(big_data >> 32);
8832  		else
8833  			*result = (u32)big_data;
8834  	} else {
8835  		*result = 0;
8836  		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8837  			   __func__, lane_id, field_id);
8838  	}
8839  
8840  	return ret;
8841  }
8842  
write_vc_local_phy(struct hfi1_devdata * dd,u8 power_management,u8 continuous)8843  static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8844  			      u8 continuous)
8845  {
8846  	u32 frame;
8847  
8848  	frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8849  		| power_management << POWER_MANAGEMENT_SHIFT;
8850  	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8851  				GENERAL_CONFIG, frame);
8852  }
8853  
write_vc_local_fabric(struct hfi1_devdata * dd,u8 vau,u8 z,u8 vcu,u16 vl15buf,u8 crc_sizes)8854  static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8855  				 u16 vl15buf, u8 crc_sizes)
8856  {
8857  	u32 frame;
8858  
8859  	frame = (u32)vau << VAU_SHIFT
8860  		| (u32)z << Z_SHIFT
8861  		| (u32)vcu << VCU_SHIFT
8862  		| (u32)vl15buf << VL15BUF_SHIFT
8863  		| (u32)crc_sizes << CRC_SIZES_SHIFT;
8864  	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8865  				GENERAL_CONFIG, frame);
8866  }
8867  
read_vc_local_link_mode(struct hfi1_devdata * dd,u8 * misc_bits,u8 * flag_bits,u16 * link_widths)8868  static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8869  				    u8 *flag_bits, u16 *link_widths)
8870  {
8871  	u32 frame;
8872  
8873  	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8874  			 &frame);
8875  	*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8876  	*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8877  	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8878  }
8879  
write_vc_local_link_mode(struct hfi1_devdata * dd,u8 misc_bits,u8 flag_bits,u16 link_widths)8880  static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8881  				    u8 misc_bits,
8882  				    u8 flag_bits,
8883  				    u16 link_widths)
8884  {
8885  	u32 frame;
8886  
8887  	frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8888  		| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8889  		| (u32)link_widths << LINK_WIDTH_SHIFT;
8890  	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8891  		     frame);
8892  }
8893  
write_local_device_id(struct hfi1_devdata * dd,u16 device_id,u8 device_rev)8894  static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8895  				 u8 device_rev)
8896  {
8897  	u32 frame;
8898  
8899  	frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8900  		| ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8901  	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8902  }
8903  
read_remote_device_id(struct hfi1_devdata * dd,u16 * device_id,u8 * device_rev)8904  static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8905  				  u8 *device_rev)
8906  {
8907  	u32 frame;
8908  
8909  	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8910  	*device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8911  	*device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8912  			& REMOTE_DEVICE_REV_MASK;
8913  }
8914  
write_host_interface_version(struct hfi1_devdata * dd,u8 version)8915  int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8916  {
8917  	u32 frame;
8918  	u32 mask;
8919  
8920  	mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8921  	read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8922  	/* Clear, then set field */
8923  	frame &= ~mask;
8924  	frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8925  	return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8926  				frame);
8927  }
8928  
read_misc_status(struct hfi1_devdata * dd,u8 * ver_major,u8 * ver_minor,u8 * ver_patch)8929  void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8930  		      u8 *ver_patch)
8931  {
8932  	u32 frame;
8933  
8934  	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8935  	*ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8936  		STS_FM_VERSION_MAJOR_MASK;
8937  	*ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8938  		STS_FM_VERSION_MINOR_MASK;
8939  
8940  	read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8941  	*ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8942  		STS_FM_VERSION_PATCH_MASK;
8943  }
8944  
read_vc_remote_phy(struct hfi1_devdata * dd,u8 * power_management,u8 * continuous)8945  static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8946  			       u8 *continuous)
8947  {
8948  	u32 frame;
8949  
8950  	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8951  	*power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8952  					& POWER_MANAGEMENT_MASK;
8953  	*continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8954  					& CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8955  }
8956  
read_vc_remote_fabric(struct hfi1_devdata * dd,u8 * vau,u8 * z,u8 * vcu,u16 * vl15buf,u8 * crc_sizes)8957  static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8958  				  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8959  {
8960  	u32 frame;
8961  
8962  	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8963  	*vau = (frame >> VAU_SHIFT) & VAU_MASK;
8964  	*z = (frame >> Z_SHIFT) & Z_MASK;
8965  	*vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8966  	*vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8967  	*crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8968  }
8969  
read_vc_remote_link_width(struct hfi1_devdata * dd,u8 * remote_tx_rate,u16 * link_widths)8970  static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8971  				      u8 *remote_tx_rate,
8972  				      u16 *link_widths)
8973  {
8974  	u32 frame;
8975  
8976  	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8977  			 &frame);
8978  	*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8979  				& REMOTE_TX_RATE_MASK;
8980  	*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8981  }
8982  
read_local_lni(struct hfi1_devdata * dd,u8 * enable_lane_rx)8983  static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8984  {
8985  	u32 frame;
8986  
8987  	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8988  	*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8989  }
8990  
read_last_local_state(struct hfi1_devdata * dd,u32 * lls)8991  static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8992  {
8993  	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8994  }
8995  
read_last_remote_state(struct hfi1_devdata * dd,u32 * lrs)8996  static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8997  {
8998  	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8999  }
9000  
hfi1_read_link_quality(struct hfi1_devdata * dd,u8 * link_quality)9001  void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9002  {
9003  	u32 frame;
9004  	int ret;
9005  
9006  	*link_quality = 0;
9007  	if (dd->pport->host_link_state & HLS_UP) {
9008  		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9009  				       &frame);
9010  		if (ret == 0)
9011  			*link_quality = (frame >> LINK_QUALITY_SHIFT)
9012  						& LINK_QUALITY_MASK;
9013  	}
9014  }
9015  
read_planned_down_reason_code(struct hfi1_devdata * dd,u8 * pdrrc)9016  static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9017  {
9018  	u32 frame;
9019  
9020  	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9021  	*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9022  }
9023  
read_link_down_reason(struct hfi1_devdata * dd,u8 * ldr)9024  static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9025  {
9026  	u32 frame;
9027  
9028  	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9029  	*ldr = (frame & 0xff);
9030  }
9031  
read_tx_settings(struct hfi1_devdata * dd,u8 * enable_lane_tx,u8 * tx_polarity_inversion,u8 * rx_polarity_inversion,u8 * max_rate)9032  static int read_tx_settings(struct hfi1_devdata *dd,
9033  			    u8 *enable_lane_tx,
9034  			    u8 *tx_polarity_inversion,
9035  			    u8 *rx_polarity_inversion,
9036  			    u8 *max_rate)
9037  {
9038  	u32 frame;
9039  	int ret;
9040  
9041  	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9042  	*enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9043  				& ENABLE_LANE_TX_MASK;
9044  	*tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9045  				& TX_POLARITY_INVERSION_MASK;
9046  	*rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9047  				& RX_POLARITY_INVERSION_MASK;
9048  	*max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9049  	return ret;
9050  }
9051  
write_tx_settings(struct hfi1_devdata * dd,u8 enable_lane_tx,u8 tx_polarity_inversion,u8 rx_polarity_inversion,u8 max_rate)9052  static int write_tx_settings(struct hfi1_devdata *dd,
9053  			     u8 enable_lane_tx,
9054  			     u8 tx_polarity_inversion,
9055  			     u8 rx_polarity_inversion,
9056  			     u8 max_rate)
9057  {
9058  	u32 frame;
9059  
9060  	/* no need to mask, all variable sizes match field widths */
9061  	frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9062  		| tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9063  		| rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9064  		| max_rate << MAX_RATE_SHIFT;
9065  	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9066  }
9067  
9068  /*
9069   * Read an idle LCB message.
9070   *
9071   * Returns 0 on success, -EINVAL on error
9072   */
read_idle_message(struct hfi1_devdata * dd,u64 type,u64 * data_out)9073  static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9074  {
9075  	int ret;
9076  
9077  	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9078  	if (ret != HCMD_SUCCESS) {
9079  		dd_dev_err(dd, "read idle message: type %d, err %d\n",
9080  			   (u32)type, ret);
9081  		return -EINVAL;
9082  	}
9083  	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9084  	/* return only the payload as we already know the type */
9085  	*data_out >>= IDLE_PAYLOAD_SHIFT;
9086  	return 0;
9087  }
9088  
9089  /*
9090   * Read an idle SMA message.  To be done in response to a notification from
9091   * the 8051.
9092   *
9093   * Returns 0 on success, -EINVAL on error
9094   */
read_idle_sma(struct hfi1_devdata * dd,u64 * data)9095  static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9096  {
9097  	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9098  				 data);
9099  }
9100  
9101  /*
9102   * Send an idle LCB message.
9103   *
9104   * Returns 0 on success, -EINVAL on error
9105   */
send_idle_message(struct hfi1_devdata * dd,u64 data)9106  static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9107  {
9108  	int ret;
9109  
9110  	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9111  	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9112  	if (ret != HCMD_SUCCESS) {
9113  		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9114  			   data, ret);
9115  		return -EINVAL;
9116  	}
9117  	return 0;
9118  }
9119  
9120  /*
9121   * Send an idle SMA message.
9122   *
9123   * Returns 0 on success, -EINVAL on error
9124   */
send_idle_sma(struct hfi1_devdata * dd,u64 message)9125  int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9126  {
9127  	u64 data;
9128  
9129  	data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9130  		((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9131  	return send_idle_message(dd, data);
9132  }
9133  
9134  /*
9135   * Initialize the LCB then do a quick link up.  This may or may not be
9136   * in loopback.
9137   *
9138   * return 0 on success, -errno on error
9139   */
do_quick_linkup(struct hfi1_devdata * dd)9140  static int do_quick_linkup(struct hfi1_devdata *dd)
9141  {
9142  	int ret;
9143  
9144  	lcb_shutdown(dd, 0);
9145  
9146  	if (loopback) {
9147  		/* LCB_CFG_LOOPBACK.VAL = 2 */
9148  		/* LCB_CFG_LANE_WIDTH.VAL = 0 */
9149  		write_csr(dd, DC_LCB_CFG_LOOPBACK,
9150  			  IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9151  		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9152  	}
9153  
9154  	/* start the LCBs */
9155  	/* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9156  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9157  
9158  	/* simulator only loopback steps */
9159  	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9160  		/* LCB_CFG_RUN.EN = 1 */
9161  		write_csr(dd, DC_LCB_CFG_RUN,
9162  			  1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9163  
9164  		ret = wait_link_transfer_active(dd, 10);
9165  		if (ret)
9166  			return ret;
9167  
9168  		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9169  			  1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9170  	}
9171  
9172  	if (!loopback) {
9173  		/*
9174  		 * When doing quick linkup and not in loopback, both
9175  		 * sides must be done with LCB set-up before either
9176  		 * starts the quick linkup.  Put a delay here so that
9177  		 * both sides can be started and have a chance to be
9178  		 * done with LCB set up before resuming.
9179  		 */
9180  		dd_dev_err(dd,
9181  			   "Pausing for peer to be finished with LCB set up\n");
9182  		msleep(5000);
9183  		dd_dev_err(dd, "Continuing with quick linkup\n");
9184  	}
9185  
9186  	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9187  	set_8051_lcb_access(dd);
9188  
9189  	/*
9190  	 * State "quick" LinkUp request sets the physical link state to
9191  	 * LinkUp without a verify capability sequence.
9192  	 * This state is in simulator v37 and later.
9193  	 */
9194  	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9195  	if (ret != HCMD_SUCCESS) {
9196  		dd_dev_err(dd,
9197  			   "%s: set physical link state to quick LinkUp failed with return %d\n",
9198  			   __func__, ret);
9199  
9200  		set_host_lcb_access(dd);
9201  		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9202  
9203  		if (ret >= 0)
9204  			ret = -EINVAL;
9205  		return ret;
9206  	}
9207  
9208  	return 0; /* success */
9209  }
9210  
9211  /*
9212   * Do all special steps to set up loopback.
9213   */
init_loopback(struct hfi1_devdata * dd)9214  static int init_loopback(struct hfi1_devdata *dd)
9215  {
9216  	dd_dev_info(dd, "Entering loopback mode\n");
9217  
9218  	/* all loopbacks should disable self GUID check */
9219  	write_csr(dd, DC_DC8051_CFG_MODE,
9220  		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9221  
9222  	/*
9223  	 * The simulator has only one loopback option - LCB.  Switch
9224  	 * to that option, which includes quick link up.
9225  	 *
9226  	 * Accept all valid loopback values.
9227  	 */
9228  	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9229  	    (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9230  	     loopback == LOOPBACK_CABLE)) {
9231  		loopback = LOOPBACK_LCB;
9232  		quick_linkup = 1;
9233  		return 0;
9234  	}
9235  
9236  	/*
9237  	 * SerDes loopback init sequence is handled in set_local_link_attributes
9238  	 */
9239  	if (loopback == LOOPBACK_SERDES)
9240  		return 0;
9241  
9242  	/* LCB loopback - handled at poll time */
9243  	if (loopback == LOOPBACK_LCB) {
9244  		quick_linkup = 1; /* LCB is always quick linkup */
9245  
9246  		/* not supported in emulation due to emulation RTL changes */
9247  		if (dd->icode == ICODE_FPGA_EMULATION) {
9248  			dd_dev_err(dd,
9249  				   "LCB loopback not supported in emulation\n");
9250  			return -EINVAL;
9251  		}
9252  		return 0;
9253  	}
9254  
9255  	/* external cable loopback requires no extra steps */
9256  	if (loopback == LOOPBACK_CABLE)
9257  		return 0;
9258  
9259  	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9260  	return -EINVAL;
9261  }
9262  
9263  /*
9264   * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9265   * used in the Verify Capability link width attribute.
9266   */
opa_to_vc_link_widths(u16 opa_widths)9267  static u16 opa_to_vc_link_widths(u16 opa_widths)
9268  {
9269  	int i;
9270  	u16 result = 0;
9271  
9272  	static const struct link_bits {
9273  		u16 from;
9274  		u16 to;
9275  	} opa_link_xlate[] = {
9276  		{ OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9277  		{ OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9278  		{ OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9279  		{ OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9280  	};
9281  
9282  	for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9283  		if (opa_widths & opa_link_xlate[i].from)
9284  			result |= opa_link_xlate[i].to;
9285  	}
9286  	return result;
9287  }
9288  
9289  /*
9290   * Set link attributes before moving to polling.
9291   */
set_local_link_attributes(struct hfi1_pportdata * ppd)9292  static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9293  {
9294  	struct hfi1_devdata *dd = ppd->dd;
9295  	u8 enable_lane_tx;
9296  	u8 tx_polarity_inversion;
9297  	u8 rx_polarity_inversion;
9298  	int ret;
9299  	u32 misc_bits = 0;
9300  	/* reset our fabric serdes to clear any lingering problems */
9301  	fabric_serdes_reset(dd);
9302  
9303  	/* set the local tx rate - need to read-modify-write */
9304  	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9305  			       &rx_polarity_inversion, &ppd->local_tx_rate);
9306  	if (ret)
9307  		goto set_local_link_attributes_fail;
9308  
9309  	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9310  		/* set the tx rate to the fastest enabled */
9311  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9312  			ppd->local_tx_rate = 1;
9313  		else
9314  			ppd->local_tx_rate = 0;
9315  	} else {
9316  		/* set the tx rate to all enabled */
9317  		ppd->local_tx_rate = 0;
9318  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9319  			ppd->local_tx_rate |= 2;
9320  		if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9321  			ppd->local_tx_rate |= 1;
9322  	}
9323  
9324  	enable_lane_tx = 0xF; /* enable all four lanes */
9325  	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9326  				rx_polarity_inversion, ppd->local_tx_rate);
9327  	if (ret != HCMD_SUCCESS)
9328  		goto set_local_link_attributes_fail;
9329  
9330  	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9331  	if (ret != HCMD_SUCCESS) {
9332  		dd_dev_err(dd,
9333  			   "Failed to set host interface version, return 0x%x\n",
9334  			   ret);
9335  		goto set_local_link_attributes_fail;
9336  	}
9337  
9338  	/*
9339  	 * DC supports continuous updates.
9340  	 */
9341  	ret = write_vc_local_phy(dd,
9342  				 0 /* no power management */,
9343  				 1 /* continuous updates */);
9344  	if (ret != HCMD_SUCCESS)
9345  		goto set_local_link_attributes_fail;
9346  
9347  	/* z=1 in the next call: AU of 0 is not supported by the hardware */
9348  	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9349  				    ppd->port_crc_mode_enabled);
9350  	if (ret != HCMD_SUCCESS)
9351  		goto set_local_link_attributes_fail;
9352  
9353  	/*
9354  	 * SerDes loopback init sequence requires
9355  	 * setting bit 0 of MISC_CONFIG_BITS
9356  	 */
9357  	if (loopback == LOOPBACK_SERDES)
9358  		misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9359  
9360  	/*
9361  	 * An external device configuration request is used to reset the LCB
9362  	 * to retry to obtain operational lanes when the first attempt is
9363  	 * unsuccesful.
9364  	 */
9365  	if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9366  		misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9367  
9368  	ret = write_vc_local_link_mode(dd, misc_bits, 0,
9369  				       opa_to_vc_link_widths(
9370  						ppd->link_width_enabled));
9371  	if (ret != HCMD_SUCCESS)
9372  		goto set_local_link_attributes_fail;
9373  
9374  	/* let peer know who we are */
9375  	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9376  	if (ret == HCMD_SUCCESS)
9377  		return 0;
9378  
9379  set_local_link_attributes_fail:
9380  	dd_dev_err(dd,
9381  		   "Failed to set local link attributes, return 0x%x\n",
9382  		   ret);
9383  	return ret;
9384  }
9385  
9386  /*
9387   * Call this to start the link.
9388   * Do not do anything if the link is disabled.
9389   * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9390   */
start_link(struct hfi1_pportdata * ppd)9391  int start_link(struct hfi1_pportdata *ppd)
9392  {
9393  	/*
9394  	 * Tune the SerDes to a ballpark setting for optimal signal and bit
9395  	 * error rate.  Needs to be done before starting the link.
9396  	 */
9397  	tune_serdes(ppd);
9398  
9399  	if (!ppd->driver_link_ready) {
9400  		dd_dev_info(ppd->dd,
9401  			    "%s: stopping link start because driver is not ready\n",
9402  			    __func__);
9403  		return 0;
9404  	}
9405  
9406  	/*
9407  	 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9408  	 * pkey table can be configured properly if the HFI unit is connected
9409  	 * to switch port with MgmtAllowed=NO
9410  	 */
9411  	clear_full_mgmt_pkey(ppd);
9412  
9413  	return set_link_state(ppd, HLS_DN_POLL);
9414  }
9415  
wait_for_qsfp_init(struct hfi1_pportdata * ppd)9416  static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9417  {
9418  	struct hfi1_devdata *dd = ppd->dd;
9419  	u64 mask;
9420  	unsigned long timeout;
9421  
9422  	/*
9423  	 * Some QSFP cables have a quirk that asserts the IntN line as a side
9424  	 * effect of power up on plug-in. We ignore this false positive
9425  	 * interrupt until the module has finished powering up by waiting for
9426  	 * a minimum timeout of the module inrush initialization time of
9427  	 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9428  	 * module have stabilized.
9429  	 */
9430  	msleep(500);
9431  
9432  	/*
9433  	 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9434  	 */
9435  	timeout = jiffies + msecs_to_jiffies(2000);
9436  	while (1) {
9437  		mask = read_csr(dd, dd->hfi1_id ?
9438  				ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9439  		if (!(mask & QSFP_HFI0_INT_N))
9440  			break;
9441  		if (time_after(jiffies, timeout)) {
9442  			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9443  				    __func__);
9444  			break;
9445  		}
9446  		udelay(2);
9447  	}
9448  }
9449  
set_qsfp_int_n(struct hfi1_pportdata * ppd,u8 enable)9450  static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9451  {
9452  	struct hfi1_devdata *dd = ppd->dd;
9453  	u64 mask;
9454  
9455  	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9456  	if (enable) {
9457  		/*
9458  		 * Clear the status register to avoid an immediate interrupt
9459  		 * when we re-enable the IntN pin
9460  		 */
9461  		write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9462  			  QSFP_HFI0_INT_N);
9463  		mask |= (u64)QSFP_HFI0_INT_N;
9464  	} else {
9465  		mask &= ~(u64)QSFP_HFI0_INT_N;
9466  	}
9467  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9468  }
9469  
reset_qsfp(struct hfi1_pportdata * ppd)9470  int reset_qsfp(struct hfi1_pportdata *ppd)
9471  {
9472  	struct hfi1_devdata *dd = ppd->dd;
9473  	u64 mask, qsfp_mask;
9474  
9475  	/* Disable INT_N from triggering QSFP interrupts */
9476  	set_qsfp_int_n(ppd, 0);
9477  
9478  	/* Reset the QSFP */
9479  	mask = (u64)QSFP_HFI0_RESET_N;
9480  
9481  	qsfp_mask = read_csr(dd,
9482  			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9483  	qsfp_mask &= ~mask;
9484  	write_csr(dd,
9485  		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9486  
9487  	udelay(10);
9488  
9489  	qsfp_mask |= mask;
9490  	write_csr(dd,
9491  		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9492  
9493  	wait_for_qsfp_init(ppd);
9494  
9495  	/*
9496  	 * Allow INT_N to trigger the QSFP interrupt to watch
9497  	 * for alarms and warnings
9498  	 */
9499  	set_qsfp_int_n(ppd, 1);
9500  
9501  	/*
9502  	 * After the reset, AOC transmitters are enabled by default. They need
9503  	 * to be turned off to complete the QSFP setup before they can be
9504  	 * enabled again.
9505  	 */
9506  	return set_qsfp_tx(ppd, 0);
9507  }
9508  
handle_qsfp_error_conditions(struct hfi1_pportdata * ppd,u8 * qsfp_interrupt_status)9509  static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9510  					u8 *qsfp_interrupt_status)
9511  {
9512  	struct hfi1_devdata *dd = ppd->dd;
9513  
9514  	if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9515  	    (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9516  		dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9517  			   __func__);
9518  
9519  	if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9520  	    (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9521  		dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9522  			   __func__);
9523  
9524  	/*
9525  	 * The remaining alarms/warnings don't matter if the link is down.
9526  	 */
9527  	if (ppd->host_link_state & HLS_DOWN)
9528  		return 0;
9529  
9530  	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9531  	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9532  		dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9533  			   __func__);
9534  
9535  	if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9536  	    (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9537  		dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9538  			   __func__);
9539  
9540  	/* Byte 2 is vendor specific */
9541  
9542  	if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9543  	    (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9544  		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9545  			   __func__);
9546  
9547  	if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9548  	    (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9549  		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9550  			   __func__);
9551  
9552  	if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9553  	    (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9554  		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9555  			   __func__);
9556  
9557  	if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9558  	    (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9559  		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9560  			   __func__);
9561  
9562  	if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9563  	    (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9564  		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9565  			   __func__);
9566  
9567  	if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9568  	    (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9569  		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9570  			   __func__);
9571  
9572  	if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9573  	    (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9574  		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9575  			   __func__);
9576  
9577  	if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9578  	    (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9579  		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9580  			   __func__);
9581  
9582  	if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9583  	    (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9584  		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9585  			   __func__);
9586  
9587  	if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9588  	    (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9589  		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9590  			   __func__);
9591  
9592  	if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9593  	    (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9594  		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9595  			   __func__);
9596  
9597  	if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9598  	    (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9599  		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9600  			   __func__);
9601  
9602  	/* Bytes 9-10 and 11-12 are reserved */
9603  	/* Bytes 13-15 are vendor specific */
9604  
9605  	return 0;
9606  }
9607  
9608  /* This routine will only be scheduled if the QSFP module present is asserted */
qsfp_event(struct work_struct * work)9609  void qsfp_event(struct work_struct *work)
9610  {
9611  	struct qsfp_data *qd;
9612  	struct hfi1_pportdata *ppd;
9613  	struct hfi1_devdata *dd;
9614  
9615  	qd = container_of(work, struct qsfp_data, qsfp_work);
9616  	ppd = qd->ppd;
9617  	dd = ppd->dd;
9618  
9619  	/* Sanity check */
9620  	if (!qsfp_mod_present(ppd))
9621  		return;
9622  
9623  	if (ppd->host_link_state == HLS_DN_DISABLE) {
9624  		dd_dev_info(ppd->dd,
9625  			    "%s: stopping link start because link is disabled\n",
9626  			    __func__);
9627  		return;
9628  	}
9629  
9630  	/*
9631  	 * Turn DC back on after cable has been re-inserted. Up until
9632  	 * now, the DC has been in reset to save power.
9633  	 */
9634  	dc_start(dd);
9635  
9636  	if (qd->cache_refresh_required) {
9637  		set_qsfp_int_n(ppd, 0);
9638  
9639  		wait_for_qsfp_init(ppd);
9640  
9641  		/*
9642  		 * Allow INT_N to trigger the QSFP interrupt to watch
9643  		 * for alarms and warnings
9644  		 */
9645  		set_qsfp_int_n(ppd, 1);
9646  
9647  		start_link(ppd);
9648  	}
9649  
9650  	if (qd->check_interrupt_flags) {
9651  		u8 qsfp_interrupt_status[16] = {0,};
9652  
9653  		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9654  				  &qsfp_interrupt_status[0], 16) != 16) {
9655  			dd_dev_info(dd,
9656  				    "%s: Failed to read status of QSFP module\n",
9657  				    __func__);
9658  		} else {
9659  			unsigned long flags;
9660  
9661  			handle_qsfp_error_conditions(
9662  					ppd, qsfp_interrupt_status);
9663  			spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9664  			ppd->qsfp_info.check_interrupt_flags = 0;
9665  			spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9666  					       flags);
9667  		}
9668  	}
9669  }
9670  
init_qsfp_int(struct hfi1_devdata * dd)9671  void init_qsfp_int(struct hfi1_devdata *dd)
9672  {
9673  	struct hfi1_pportdata *ppd = dd->pport;
9674  	u64 qsfp_mask;
9675  
9676  	qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9677  	/* Clear current status to avoid spurious interrupts */
9678  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9679  		  qsfp_mask);
9680  	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9681  		  qsfp_mask);
9682  
9683  	set_qsfp_int_n(ppd, 0);
9684  
9685  	/* Handle active low nature of INT_N and MODPRST_N pins */
9686  	if (qsfp_mod_present(ppd))
9687  		qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9688  	write_csr(dd,
9689  		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9690  		  qsfp_mask);
9691  
9692  	/* Enable the appropriate QSFP IRQ source */
9693  	if (!dd->hfi1_id)
9694  		set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9695  	else
9696  		set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9697  }
9698  
9699  /*
9700   * Do a one-time initialize of the LCB block.
9701   */
init_lcb(struct hfi1_devdata * dd)9702  static void init_lcb(struct hfi1_devdata *dd)
9703  {
9704  	/* simulator does not correctly handle LCB cclk loopback, skip */
9705  	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9706  		return;
9707  
9708  	/* the DC has been reset earlier in the driver load */
9709  
9710  	/* set LCB for cclk loopback on the port */
9711  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9712  	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9713  	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9714  	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9715  	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9716  	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9717  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9718  }
9719  
9720  /*
9721   * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9722   * on error.
9723   */
test_qsfp_read(struct hfi1_pportdata * ppd)9724  static int test_qsfp_read(struct hfi1_pportdata *ppd)
9725  {
9726  	int ret;
9727  	u8 status;
9728  
9729  	/*
9730  	 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9731  	 * not present
9732  	 */
9733  	if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9734  		return 0;
9735  
9736  	/* read byte 2, the status byte */
9737  	ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9738  	if (ret < 0)
9739  		return ret;
9740  	if (ret != 1)
9741  		return -EIO;
9742  
9743  	return 0; /* success */
9744  }
9745  
9746  /*
9747   * Values for QSFP retry.
9748   *
9749   * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9750   * arrived at from experience on a large cluster.
9751   */
9752  #define MAX_QSFP_RETRIES 20
9753  #define QSFP_RETRY_WAIT 500 /* msec */
9754  
9755  /*
9756   * Try a QSFP read.  If it fails, schedule a retry for later.
9757   * Called on first link activation after driver load.
9758   */
try_start_link(struct hfi1_pportdata * ppd)9759  static void try_start_link(struct hfi1_pportdata *ppd)
9760  {
9761  	if (test_qsfp_read(ppd)) {
9762  		/* read failed */
9763  		if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9764  			dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9765  			return;
9766  		}
9767  		dd_dev_info(ppd->dd,
9768  			    "QSFP not responding, waiting and retrying %d\n",
9769  			    (int)ppd->qsfp_retry_count);
9770  		ppd->qsfp_retry_count++;
9771  		queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9772  				   msecs_to_jiffies(QSFP_RETRY_WAIT));
9773  		return;
9774  	}
9775  	ppd->qsfp_retry_count = 0;
9776  
9777  	start_link(ppd);
9778  }
9779  
9780  /*
9781   * Workqueue function to start the link after a delay.
9782   */
handle_start_link(struct work_struct * work)9783  void handle_start_link(struct work_struct *work)
9784  {
9785  	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9786  						  start_link_work.work);
9787  	try_start_link(ppd);
9788  }
9789  
bringup_serdes(struct hfi1_pportdata * ppd)9790  int bringup_serdes(struct hfi1_pportdata *ppd)
9791  {
9792  	struct hfi1_devdata *dd = ppd->dd;
9793  	u64 guid;
9794  	int ret;
9795  
9796  	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9797  		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9798  
9799  	guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9800  	if (!guid) {
9801  		if (dd->base_guid)
9802  			guid = dd->base_guid + ppd->port - 1;
9803  		ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9804  	}
9805  
9806  	/* Set linkinit_reason on power up per OPA spec */
9807  	ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9808  
9809  	/* one-time init of the LCB */
9810  	init_lcb(dd);
9811  
9812  	if (loopback) {
9813  		ret = init_loopback(dd);
9814  		if (ret < 0)
9815  			return ret;
9816  	}
9817  
9818  	get_port_type(ppd);
9819  	if (ppd->port_type == PORT_TYPE_QSFP) {
9820  		set_qsfp_int_n(ppd, 0);
9821  		wait_for_qsfp_init(ppd);
9822  		set_qsfp_int_n(ppd, 1);
9823  	}
9824  
9825  	try_start_link(ppd);
9826  	return 0;
9827  }
9828  
hfi1_quiet_serdes(struct hfi1_pportdata * ppd)9829  void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9830  {
9831  	struct hfi1_devdata *dd = ppd->dd;
9832  
9833  	/*
9834  	 * Shut down the link and keep it down.   First turn off that the
9835  	 * driver wants to allow the link to be up (driver_link_ready).
9836  	 * Then make sure the link is not automatically restarted
9837  	 * (link_enabled).  Cancel any pending restart.  And finally
9838  	 * go offline.
9839  	 */
9840  	ppd->driver_link_ready = 0;
9841  	ppd->link_enabled = 0;
9842  
9843  	ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9844  	flush_delayed_work(&ppd->start_link_work);
9845  	cancel_delayed_work_sync(&ppd->start_link_work);
9846  
9847  	ppd->offline_disabled_reason =
9848  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9849  	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9850  			     OPA_LINKDOWN_REASON_REBOOT);
9851  	set_link_state(ppd, HLS_DN_OFFLINE);
9852  
9853  	/* disable the port */
9854  	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9855  	cancel_work_sync(&ppd->freeze_work);
9856  }
9857  
init_cpu_counters(struct hfi1_devdata * dd)9858  static inline int init_cpu_counters(struct hfi1_devdata *dd)
9859  {
9860  	struct hfi1_pportdata *ppd;
9861  	int i;
9862  
9863  	ppd = (struct hfi1_pportdata *)(dd + 1);
9864  	for (i = 0; i < dd->num_pports; i++, ppd++) {
9865  		ppd->ibport_data.rvp.rc_acks = NULL;
9866  		ppd->ibport_data.rvp.rc_qacks = NULL;
9867  		ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9868  		ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9869  		ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9870  		if (!ppd->ibport_data.rvp.rc_acks ||
9871  		    !ppd->ibport_data.rvp.rc_delayed_comp ||
9872  		    !ppd->ibport_data.rvp.rc_qacks)
9873  			return -ENOMEM;
9874  	}
9875  
9876  	return 0;
9877  }
9878  
9879  /*
9880   * index is the index into the receive array
9881   */
hfi1_put_tid(struct hfi1_devdata * dd,u32 index,u32 type,unsigned long pa,u16 order)9882  void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9883  		  u32 type, unsigned long pa, u16 order)
9884  {
9885  	u64 reg;
9886  
9887  	if (!(dd->flags & HFI1_PRESENT))
9888  		goto done;
9889  
9890  	if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9891  		pa = 0;
9892  		order = 0;
9893  	} else if (type > PT_INVALID) {
9894  		dd_dev_err(dd,
9895  			   "unexpected receive array type %u for index %u, not handled\n",
9896  			   type, index);
9897  		goto done;
9898  	}
9899  	trace_hfi1_put_tid(dd, index, type, pa, order);
9900  
9901  #define RT_ADDR_SHIFT 12	/* 4KB kernel address boundary */
9902  	reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9903  		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9904  		| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9905  					<< RCV_ARRAY_RT_ADDR_SHIFT;
9906  	trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9907  	writeq(reg, dd->rcvarray_wc + (index * 8));
9908  
9909  	if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9910  		/*
9911  		 * Eager entries are written and flushed
9912  		 *
9913  		 * Expected entries are flushed every 4 writes
9914  		 */
9915  		flush_wc();
9916  done:
9917  	return;
9918  }
9919  
hfi1_clear_tids(struct hfi1_ctxtdata * rcd)9920  void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9921  {
9922  	struct hfi1_devdata *dd = rcd->dd;
9923  	u32 i;
9924  
9925  	/* this could be optimized */
9926  	for (i = rcd->eager_base; i < rcd->eager_base +
9927  		     rcd->egrbufs.alloced; i++)
9928  		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9929  
9930  	for (i = rcd->expected_base;
9931  			i < rcd->expected_base + rcd->expected_count; i++)
9932  		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9933  }
9934  
9935  static const char * const ib_cfg_name_strings[] = {
9936  	"HFI1_IB_CFG_LIDLMC",
9937  	"HFI1_IB_CFG_LWID_DG_ENB",
9938  	"HFI1_IB_CFG_LWID_ENB",
9939  	"HFI1_IB_CFG_LWID",
9940  	"HFI1_IB_CFG_SPD_ENB",
9941  	"HFI1_IB_CFG_SPD",
9942  	"HFI1_IB_CFG_RXPOL_ENB",
9943  	"HFI1_IB_CFG_LREV_ENB",
9944  	"HFI1_IB_CFG_LINKLATENCY",
9945  	"HFI1_IB_CFG_HRTBT",
9946  	"HFI1_IB_CFG_OP_VLS",
9947  	"HFI1_IB_CFG_VL_HIGH_CAP",
9948  	"HFI1_IB_CFG_VL_LOW_CAP",
9949  	"HFI1_IB_CFG_OVERRUN_THRESH",
9950  	"HFI1_IB_CFG_PHYERR_THRESH",
9951  	"HFI1_IB_CFG_LINKDEFAULT",
9952  	"HFI1_IB_CFG_PKEYS",
9953  	"HFI1_IB_CFG_MTU",
9954  	"HFI1_IB_CFG_LSTATE",
9955  	"HFI1_IB_CFG_VL_HIGH_LIMIT",
9956  	"HFI1_IB_CFG_PMA_TICKS",
9957  	"HFI1_IB_CFG_PORT"
9958  };
9959  
ib_cfg_name(int which)9960  static const char *ib_cfg_name(int which)
9961  {
9962  	if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9963  		return "invalid";
9964  	return ib_cfg_name_strings[which];
9965  }
9966  
hfi1_get_ib_cfg(struct hfi1_pportdata * ppd,int which)9967  int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9968  {
9969  	struct hfi1_devdata *dd = ppd->dd;
9970  	int val = 0;
9971  
9972  	switch (which) {
9973  	case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9974  		val = ppd->link_width_enabled;
9975  		break;
9976  	case HFI1_IB_CFG_LWID: /* currently active Link-width */
9977  		val = ppd->link_width_active;
9978  		break;
9979  	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9980  		val = ppd->link_speed_enabled;
9981  		break;
9982  	case HFI1_IB_CFG_SPD: /* current Link speed */
9983  		val = ppd->link_speed_active;
9984  		break;
9985  
9986  	case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9987  	case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9988  	case HFI1_IB_CFG_LINKLATENCY:
9989  		goto unimplemented;
9990  
9991  	case HFI1_IB_CFG_OP_VLS:
9992  		val = ppd->actual_vls_operational;
9993  		break;
9994  	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9995  		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9996  		break;
9997  	case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9998  		val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9999  		break;
10000  	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10001  		val = ppd->overrun_threshold;
10002  		break;
10003  	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10004  		val = ppd->phy_error_threshold;
10005  		break;
10006  	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10007  		val = HLS_DEFAULT;
10008  		break;
10009  
10010  	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10011  	case HFI1_IB_CFG_PMA_TICKS:
10012  	default:
10013  unimplemented:
10014  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10015  			dd_dev_info(
10016  				dd,
10017  				"%s: which %s: not implemented\n",
10018  				__func__,
10019  				ib_cfg_name(which));
10020  		break;
10021  	}
10022  
10023  	return val;
10024  }
10025  
10026  /*
10027   * The largest MAD packet size.
10028   */
10029  #define MAX_MAD_PACKET 2048
10030  
10031  /*
10032   * Return the maximum header bytes that can go on the _wire_
10033   * for this device. This count includes the ICRC which is
10034   * not part of the packet held in memory but it is appended
10035   * by the HW.
10036   * This is dependent on the device's receive header entry size.
10037   * HFI allows this to be set per-receive context, but the
10038   * driver presently enforces a global value.
10039   */
lrh_max_header_bytes(struct hfi1_devdata * dd)10040  u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10041  {
10042  	/*
10043  	 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10044  	 * the Receive Header Entry Size minus the PBC (or RHF) size
10045  	 * plus one DW for the ICRC appended by HW.
10046  	 *
10047  	 * dd->rcd[0].rcvhdrqentsize is in DW.
10048  	 * We use rcd[0] as all context will have the same value. Also,
10049  	 * the first kernel context would have been allocated by now so
10050  	 * we are guaranteed a valid value.
10051  	 */
10052  	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10053  }
10054  
10055  /*
10056   * Set Send Length
10057   * @ppd - per port data
10058   *
10059   * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10060   * registers compare against LRH.PktLen, so use the max bytes included
10061   * in the LRH.
10062   *
10063   * This routine changes all VL values except VL15, which it maintains at
10064   * the same value.
10065   */
set_send_length(struct hfi1_pportdata * ppd)10066  static void set_send_length(struct hfi1_pportdata *ppd)
10067  {
10068  	struct hfi1_devdata *dd = ppd->dd;
10069  	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10070  	u32 maxvlmtu = dd->vld[15].mtu;
10071  	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10072  			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10073  		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10074  	int i, j;
10075  	u32 thres;
10076  
10077  	for (i = 0; i < ppd->vls_supported; i++) {
10078  		if (dd->vld[i].mtu > maxvlmtu)
10079  			maxvlmtu = dd->vld[i].mtu;
10080  		if (i <= 3)
10081  			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10082  				 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10083  				((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10084  		else
10085  			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10086  				 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10087  				((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10088  	}
10089  	write_csr(dd, SEND_LEN_CHECK0, len1);
10090  	write_csr(dd, SEND_LEN_CHECK1, len2);
10091  	/* adjust kernel credit return thresholds based on new MTUs */
10092  	/* all kernel receive contexts have the same hdrqentsize */
10093  	for (i = 0; i < ppd->vls_supported; i++) {
10094  		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10095  			    sc_mtu_to_threshold(dd->vld[i].sc,
10096  						dd->vld[i].mtu,
10097  						dd->rcd[0]->rcvhdrqentsize));
10098  		for (j = 0; j < INIT_SC_PER_VL; j++)
10099  			sc_set_cr_threshold(
10100  					pio_select_send_context_vl(dd, j, i),
10101  					    thres);
10102  	}
10103  	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10104  		    sc_mtu_to_threshold(dd->vld[15].sc,
10105  					dd->vld[15].mtu,
10106  					dd->rcd[0]->rcvhdrqentsize));
10107  	sc_set_cr_threshold(dd->vld[15].sc, thres);
10108  
10109  	/* Adjust maximum MTU for the port in DC */
10110  	dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10111  		(ilog2(maxvlmtu >> 8) + 1);
10112  	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10113  	len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10114  	len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10115  		DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10116  	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10117  }
10118  
set_lidlmc(struct hfi1_pportdata * ppd)10119  static void set_lidlmc(struct hfi1_pportdata *ppd)
10120  {
10121  	int i;
10122  	u64 sreg = 0;
10123  	struct hfi1_devdata *dd = ppd->dd;
10124  	u32 mask = ~((1U << ppd->lmc) - 1);
10125  	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10126  	u32 lid;
10127  
10128  	/*
10129  	 * Program 0 in CSR if port lid is extended. This prevents
10130  	 * 9B packets being sent out for large lids.
10131  	 */
10132  	lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10133  	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10134  		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10135  	c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10136  			<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10137  	      ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10138  			<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10139  	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10140  
10141  	/*
10142  	 * Iterate over all the send contexts and set their SLID check
10143  	 */
10144  	sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10145  			SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10146  	       (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10147  			SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10148  
10149  	for (i = 0; i < chip_send_contexts(dd); i++) {
10150  		hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10151  			  i, (u32)sreg);
10152  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10153  	}
10154  
10155  	/* Now we have to do the same thing for the sdma engines */
10156  	sdma_update_lmc(dd, mask, lid);
10157  }
10158  
state_completed_string(u32 completed)10159  static const char *state_completed_string(u32 completed)
10160  {
10161  	static const char * const state_completed[] = {
10162  		"EstablishComm",
10163  		"OptimizeEQ",
10164  		"VerifyCap"
10165  	};
10166  
10167  	if (completed < ARRAY_SIZE(state_completed))
10168  		return state_completed[completed];
10169  
10170  	return "unknown";
10171  }
10172  
10173  static const char all_lanes_dead_timeout_expired[] =
10174  	"All lanes were inactive – was the interconnect media removed?";
10175  static const char tx_out_of_policy[] =
10176  	"Passing lanes on local port do not meet the local link width policy";
10177  static const char no_state_complete[] =
10178  	"State timeout occurred before link partner completed the state";
10179  static const char * const state_complete_reasons[] = {
10180  	[0x00] = "Reason unknown",
10181  	[0x01] = "Link was halted by driver, refer to LinkDownReason",
10182  	[0x02] = "Link partner reported failure",
10183  	[0x10] = "Unable to achieve frame sync on any lane",
10184  	[0x11] =
10185  	  "Unable to find a common bit rate with the link partner",
10186  	[0x12] =
10187  	  "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10188  	[0x13] =
10189  	  "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10190  	[0x14] = no_state_complete,
10191  	[0x15] =
10192  	  "State timeout occurred before link partner identified equalization presets",
10193  	[0x16] =
10194  	  "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10195  	[0x17] = tx_out_of_policy,
10196  	[0x20] = all_lanes_dead_timeout_expired,
10197  	[0x21] =
10198  	  "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10199  	[0x22] = no_state_complete,
10200  	[0x23] =
10201  	  "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10202  	[0x24] = tx_out_of_policy,
10203  	[0x30] = all_lanes_dead_timeout_expired,
10204  	[0x31] =
10205  	  "State timeout occurred waiting for host to process received frames",
10206  	[0x32] = no_state_complete,
10207  	[0x33] =
10208  	  "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10209  	[0x34] = tx_out_of_policy,
10210  	[0x35] = "Negotiated link width is mutually exclusive",
10211  	[0x36] =
10212  	  "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10213  	[0x37] = "Unable to resolve secure data exchange",
10214  };
10215  
state_complete_reason_code_string(struct hfi1_pportdata * ppd,u32 code)10216  static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10217  						     u32 code)
10218  {
10219  	const char *str = NULL;
10220  
10221  	if (code < ARRAY_SIZE(state_complete_reasons))
10222  		str = state_complete_reasons[code];
10223  
10224  	if (str)
10225  		return str;
10226  	return "Reserved";
10227  }
10228  
10229  /* describe the given last state complete frame */
decode_state_complete(struct hfi1_pportdata * ppd,u32 frame,const char * prefix)10230  static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10231  				  const char *prefix)
10232  {
10233  	struct hfi1_devdata *dd = ppd->dd;
10234  	u32 success;
10235  	u32 state;
10236  	u32 reason;
10237  	u32 lanes;
10238  
10239  	/*
10240  	 * Decode frame:
10241  	 *  [ 0: 0] - success
10242  	 *  [ 3: 1] - state
10243  	 *  [ 7: 4] - next state timeout
10244  	 *  [15: 8] - reason code
10245  	 *  [31:16] - lanes
10246  	 */
10247  	success = frame & 0x1;
10248  	state = (frame >> 1) & 0x7;
10249  	reason = (frame >> 8) & 0xff;
10250  	lanes = (frame >> 16) & 0xffff;
10251  
10252  	dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10253  		   prefix, frame);
10254  	dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10255  		   state_completed_string(state), state);
10256  	dd_dev_err(dd, "    state successfully completed: %s\n",
10257  		   success ? "yes" : "no");
10258  	dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10259  		   reason, state_complete_reason_code_string(ppd, reason));
10260  	dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10261  }
10262  
10263  /*
10264   * Read the last state complete frames and explain them.  This routine
10265   * expects to be called if the link went down during link negotiation
10266   * and initialization (LNI).  That is, anywhere between polling and link up.
10267   */
check_lni_states(struct hfi1_pportdata * ppd)10268  static void check_lni_states(struct hfi1_pportdata *ppd)
10269  {
10270  	u32 last_local_state;
10271  	u32 last_remote_state;
10272  
10273  	read_last_local_state(ppd->dd, &last_local_state);
10274  	read_last_remote_state(ppd->dd, &last_remote_state);
10275  
10276  	/*
10277  	 * Don't report anything if there is nothing to report.  A value of
10278  	 * 0 means the link was taken down while polling and there was no
10279  	 * training in-process.
10280  	 */
10281  	if (last_local_state == 0 && last_remote_state == 0)
10282  		return;
10283  
10284  	decode_state_complete(ppd, last_local_state, "transmitted");
10285  	decode_state_complete(ppd, last_remote_state, "received");
10286  }
10287  
10288  /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
wait_link_transfer_active(struct hfi1_devdata * dd,int wait_ms)10289  static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10290  {
10291  	u64 reg;
10292  	unsigned long timeout;
10293  
10294  	/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10295  	timeout = jiffies + msecs_to_jiffies(wait_ms);
10296  	while (1) {
10297  		reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10298  		if (reg)
10299  			break;
10300  		if (time_after(jiffies, timeout)) {
10301  			dd_dev_err(dd,
10302  				   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10303  			return -ETIMEDOUT;
10304  		}
10305  		udelay(2);
10306  	}
10307  	return 0;
10308  }
10309  
10310  /* called when the logical link state is not down as it should be */
force_logical_link_state_down(struct hfi1_pportdata * ppd)10311  static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10312  {
10313  	struct hfi1_devdata *dd = ppd->dd;
10314  
10315  	/*
10316  	 * Bring link up in LCB loopback
10317  	 */
10318  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10319  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10320  		  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10321  
10322  	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10323  	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10324  	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10325  	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10326  
10327  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10328  	(void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10329  	udelay(3);
10330  	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10331  	write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10332  
10333  	wait_link_transfer_active(dd, 100);
10334  
10335  	/*
10336  	 * Bring the link down again.
10337  	 */
10338  	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10339  	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10340  	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10341  
10342  	dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10343  }
10344  
10345  /*
10346   * Helper for set_link_state().  Do not call except from that routine.
10347   * Expects ppd->hls_mutex to be held.
10348   *
10349   * @rem_reason value to be sent to the neighbor
10350   *
10351   * LinkDownReasons only set if transition succeeds.
10352   */
goto_offline(struct hfi1_pportdata * ppd,u8 rem_reason)10353  static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10354  {
10355  	struct hfi1_devdata *dd = ppd->dd;
10356  	u32 previous_state;
10357  	int offline_state_ret;
10358  	int ret;
10359  
10360  	update_lcb_cache(dd);
10361  
10362  	previous_state = ppd->host_link_state;
10363  	ppd->host_link_state = HLS_GOING_OFFLINE;
10364  
10365  	/* start offline transition */
10366  	ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10367  
10368  	if (ret != HCMD_SUCCESS) {
10369  		dd_dev_err(dd,
10370  			   "Failed to transition to Offline link state, return %d\n",
10371  			   ret);
10372  		return -EINVAL;
10373  	}
10374  	if (ppd->offline_disabled_reason ==
10375  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10376  		ppd->offline_disabled_reason =
10377  		HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10378  
10379  	offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10380  	if (offline_state_ret < 0)
10381  		return offline_state_ret;
10382  
10383  	/* Disabling AOC transmitters */
10384  	if (ppd->port_type == PORT_TYPE_QSFP &&
10385  	    ppd->qsfp_info.limiting_active &&
10386  	    qsfp_mod_present(ppd)) {
10387  		int ret;
10388  
10389  		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10390  		if (ret == 0) {
10391  			set_qsfp_tx(ppd, 0);
10392  			release_chip_resource(dd, qsfp_resource(dd));
10393  		} else {
10394  			/* not fatal, but should warn */
10395  			dd_dev_err(dd,
10396  				   "Unable to acquire lock to turn off QSFP TX\n");
10397  		}
10398  	}
10399  
10400  	/*
10401  	 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10402  	 * can take a while for the link to go down.
10403  	 */
10404  	if (offline_state_ret != PLS_OFFLINE_QUIET) {
10405  		ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10406  		if (ret < 0)
10407  			return ret;
10408  	}
10409  
10410  	/*
10411  	 * Now in charge of LCB - must be after the physical state is
10412  	 * offline.quiet and before host_link_state is changed.
10413  	 */
10414  	set_host_lcb_access(dd);
10415  	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10416  
10417  	/* make sure the logical state is also down */
10418  	ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10419  	if (ret)
10420  		force_logical_link_state_down(ppd);
10421  
10422  	ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10423  	update_statusp(ppd, IB_PORT_DOWN);
10424  
10425  	/*
10426  	 * The LNI has a mandatory wait time after the physical state
10427  	 * moves to Offline.Quiet.  The wait time may be different
10428  	 * depending on how the link went down.  The 8051 firmware
10429  	 * will observe the needed wait time and only move to ready
10430  	 * when that is completed.  The largest of the quiet timeouts
10431  	 * is 6s, so wait that long and then at least 0.5s more for
10432  	 * other transitions, and another 0.5s for a buffer.
10433  	 */
10434  	ret = wait_fm_ready(dd, 7000);
10435  	if (ret) {
10436  		dd_dev_err(dd,
10437  			   "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10438  		/* state is really offline, so make it so */
10439  		ppd->host_link_state = HLS_DN_OFFLINE;
10440  		return ret;
10441  	}
10442  
10443  	/*
10444  	 * The state is now offline and the 8051 is ready to accept host
10445  	 * requests.
10446  	 *	- change our state
10447  	 *	- notify others if we were previously in a linkup state
10448  	 */
10449  	ppd->host_link_state = HLS_DN_OFFLINE;
10450  	if (previous_state & HLS_UP) {
10451  		/* went down while link was up */
10452  		handle_linkup_change(dd, 0);
10453  	} else if (previous_state
10454  			& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10455  		/* went down while attempting link up */
10456  		check_lni_states(ppd);
10457  
10458  		/* The QSFP doesn't need to be reset on LNI failure */
10459  		ppd->qsfp_info.reset_needed = 0;
10460  	}
10461  
10462  	/* the active link width (downgrade) is 0 on link down */
10463  	ppd->link_width_active = 0;
10464  	ppd->link_width_downgrade_tx_active = 0;
10465  	ppd->link_width_downgrade_rx_active = 0;
10466  	ppd->current_egress_rate = 0;
10467  	return 0;
10468  }
10469  
10470  /* return the link state name */
link_state_name(u32 state)10471  static const char *link_state_name(u32 state)
10472  {
10473  	const char *name;
10474  	int n = ilog2(state);
10475  	static const char * const names[] = {
10476  		[__HLS_UP_INIT_BP]	 = "INIT",
10477  		[__HLS_UP_ARMED_BP]	 = "ARMED",
10478  		[__HLS_UP_ACTIVE_BP]	 = "ACTIVE",
10479  		[__HLS_DN_DOWNDEF_BP]	 = "DOWNDEF",
10480  		[__HLS_DN_POLL_BP]	 = "POLL",
10481  		[__HLS_DN_DISABLE_BP]	 = "DISABLE",
10482  		[__HLS_DN_OFFLINE_BP]	 = "OFFLINE",
10483  		[__HLS_VERIFY_CAP_BP]	 = "VERIFY_CAP",
10484  		[__HLS_GOING_UP_BP]	 = "GOING_UP",
10485  		[__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10486  		[__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10487  	};
10488  
10489  	name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10490  	return name ? name : "unknown";
10491  }
10492  
10493  /* return the link state reason name */
link_state_reason_name(struct hfi1_pportdata * ppd,u32 state)10494  static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10495  {
10496  	if (state == HLS_UP_INIT) {
10497  		switch (ppd->linkinit_reason) {
10498  		case OPA_LINKINIT_REASON_LINKUP:
10499  			return "(LINKUP)";
10500  		case OPA_LINKINIT_REASON_FLAPPING:
10501  			return "(FLAPPING)";
10502  		case OPA_LINKINIT_OUTSIDE_POLICY:
10503  			return "(OUTSIDE_POLICY)";
10504  		case OPA_LINKINIT_QUARANTINED:
10505  			return "(QUARANTINED)";
10506  		case OPA_LINKINIT_INSUFIC_CAPABILITY:
10507  			return "(INSUFIC_CAPABILITY)";
10508  		default:
10509  			break;
10510  		}
10511  	}
10512  	return "";
10513  }
10514  
10515  /*
10516   * driver_pstate - convert the driver's notion of a port's
10517   * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10518   * Return -1 (converted to a u32) to indicate error.
10519   */
driver_pstate(struct hfi1_pportdata * ppd)10520  u32 driver_pstate(struct hfi1_pportdata *ppd)
10521  {
10522  	switch (ppd->host_link_state) {
10523  	case HLS_UP_INIT:
10524  	case HLS_UP_ARMED:
10525  	case HLS_UP_ACTIVE:
10526  		return IB_PORTPHYSSTATE_LINKUP;
10527  	case HLS_DN_POLL:
10528  		return IB_PORTPHYSSTATE_POLLING;
10529  	case HLS_DN_DISABLE:
10530  		return IB_PORTPHYSSTATE_DISABLED;
10531  	case HLS_DN_OFFLINE:
10532  		return OPA_PORTPHYSSTATE_OFFLINE;
10533  	case HLS_VERIFY_CAP:
10534  		return IB_PORTPHYSSTATE_TRAINING;
10535  	case HLS_GOING_UP:
10536  		return IB_PORTPHYSSTATE_TRAINING;
10537  	case HLS_GOING_OFFLINE:
10538  		return OPA_PORTPHYSSTATE_OFFLINE;
10539  	case HLS_LINK_COOLDOWN:
10540  		return OPA_PORTPHYSSTATE_OFFLINE;
10541  	case HLS_DN_DOWNDEF:
10542  	default:
10543  		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10544  			   ppd->host_link_state);
10545  		return  -1;
10546  	}
10547  }
10548  
10549  /*
10550   * driver_lstate - convert the driver's notion of a port's
10551   * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10552   * (converted to a u32) to indicate error.
10553   */
driver_lstate(struct hfi1_pportdata * ppd)10554  u32 driver_lstate(struct hfi1_pportdata *ppd)
10555  {
10556  	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10557  		return IB_PORT_DOWN;
10558  
10559  	switch (ppd->host_link_state & HLS_UP) {
10560  	case HLS_UP_INIT:
10561  		return IB_PORT_INIT;
10562  	case HLS_UP_ARMED:
10563  		return IB_PORT_ARMED;
10564  	case HLS_UP_ACTIVE:
10565  		return IB_PORT_ACTIVE;
10566  	default:
10567  		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10568  			   ppd->host_link_state);
10569  	return -1;
10570  	}
10571  }
10572  
set_link_down_reason(struct hfi1_pportdata * ppd,u8 lcl_reason,u8 neigh_reason,u8 rem_reason)10573  void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10574  			  u8 neigh_reason, u8 rem_reason)
10575  {
10576  	if (ppd->local_link_down_reason.latest == 0 &&
10577  	    ppd->neigh_link_down_reason.latest == 0) {
10578  		ppd->local_link_down_reason.latest = lcl_reason;
10579  		ppd->neigh_link_down_reason.latest = neigh_reason;
10580  		ppd->remote_link_down_reason = rem_reason;
10581  	}
10582  }
10583  
10584  /**
10585   * data_vls_operational() - Verify if data VL BCT credits and MTU
10586   *			    are both set.
10587   * @ppd: pointer to hfi1_pportdata structure
10588   *
10589   * Return: true - Ok, false -otherwise.
10590   */
data_vls_operational(struct hfi1_pportdata * ppd)10591  static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10592  {
10593  	int i;
10594  	u64 reg;
10595  
10596  	if (!ppd->actual_vls_operational)
10597  		return false;
10598  
10599  	for (i = 0; i < ppd->vls_supported; i++) {
10600  		reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10601  		if ((reg && !ppd->dd->vld[i].mtu) ||
10602  		    (!reg && ppd->dd->vld[i].mtu))
10603  			return false;
10604  	}
10605  
10606  	return true;
10607  }
10608  
10609  /*
10610   * Change the physical and/or logical link state.
10611   *
10612   * Do not call this routine while inside an interrupt.  It contains
10613   * calls to routines that can take multiple seconds to finish.
10614   *
10615   * Returns 0 on success, -errno on failure.
10616   */
set_link_state(struct hfi1_pportdata * ppd,u32 state)10617  int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10618  {
10619  	struct hfi1_devdata *dd = ppd->dd;
10620  	struct ib_event event = {.device = NULL};
10621  	int ret1, ret = 0;
10622  	int orig_new_state, poll_bounce;
10623  
10624  	mutex_lock(&ppd->hls_lock);
10625  
10626  	orig_new_state = state;
10627  	if (state == HLS_DN_DOWNDEF)
10628  		state = HLS_DEFAULT;
10629  
10630  	/* interpret poll -> poll as a link bounce */
10631  	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10632  		      state == HLS_DN_POLL;
10633  
10634  	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10635  		    link_state_name(ppd->host_link_state),
10636  		    link_state_name(orig_new_state),
10637  		    poll_bounce ? "(bounce) " : "",
10638  		    link_state_reason_name(ppd, state));
10639  
10640  	/*
10641  	 * If we're going to a (HLS_*) link state that implies the logical
10642  	 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10643  	 * reset is_sm_config_started to 0.
10644  	 */
10645  	if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10646  		ppd->is_sm_config_started = 0;
10647  
10648  	/*
10649  	 * Do nothing if the states match.  Let a poll to poll link bounce
10650  	 * go through.
10651  	 */
10652  	if (ppd->host_link_state == state && !poll_bounce)
10653  		goto done;
10654  
10655  	switch (state) {
10656  	case HLS_UP_INIT:
10657  		if (ppd->host_link_state == HLS_DN_POLL &&
10658  		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10659  			/*
10660  			 * Quick link up jumps from polling to here.
10661  			 *
10662  			 * Whether in normal or loopback mode, the
10663  			 * simulator jumps from polling to link up.
10664  			 * Accept that here.
10665  			 */
10666  			/* OK */
10667  		} else if (ppd->host_link_state != HLS_GOING_UP) {
10668  			goto unexpected;
10669  		}
10670  
10671  		/*
10672  		 * Wait for Link_Up physical state.
10673  		 * Physical and Logical states should already be
10674  		 * be transitioned to LinkUp and LinkInit respectively.
10675  		 */
10676  		ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10677  		if (ret) {
10678  			dd_dev_err(dd,
10679  				   "%s: physical state did not change to LINK-UP\n",
10680  				   __func__);
10681  			break;
10682  		}
10683  
10684  		ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10685  		if (ret) {
10686  			dd_dev_err(dd,
10687  				   "%s: logical state did not change to INIT\n",
10688  				   __func__);
10689  			break;
10690  		}
10691  
10692  		/* clear old transient LINKINIT_REASON code */
10693  		if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10694  			ppd->linkinit_reason =
10695  				OPA_LINKINIT_REASON_LINKUP;
10696  
10697  		/* enable the port */
10698  		add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10699  
10700  		handle_linkup_change(dd, 1);
10701  		pio_kernel_linkup(dd);
10702  
10703  		/*
10704  		 * After link up, a new link width will have been set.
10705  		 * Update the xmit counters with regards to the new
10706  		 * link width.
10707  		 */
10708  		update_xmit_counters(ppd, ppd->link_width_active);
10709  
10710  		ppd->host_link_state = HLS_UP_INIT;
10711  		update_statusp(ppd, IB_PORT_INIT);
10712  		break;
10713  	case HLS_UP_ARMED:
10714  		if (ppd->host_link_state != HLS_UP_INIT)
10715  			goto unexpected;
10716  
10717  		if (!data_vls_operational(ppd)) {
10718  			dd_dev_err(dd,
10719  				   "%s: Invalid data VL credits or mtu\n",
10720  				   __func__);
10721  			ret = -EINVAL;
10722  			break;
10723  		}
10724  
10725  		set_logical_state(dd, LSTATE_ARMED);
10726  		ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10727  		if (ret) {
10728  			dd_dev_err(dd,
10729  				   "%s: logical state did not change to ARMED\n",
10730  				   __func__);
10731  			break;
10732  		}
10733  		ppd->host_link_state = HLS_UP_ARMED;
10734  		update_statusp(ppd, IB_PORT_ARMED);
10735  		/*
10736  		 * The simulator does not currently implement SMA messages,
10737  		 * so neighbor_normal is not set.  Set it here when we first
10738  		 * move to Armed.
10739  		 */
10740  		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10741  			ppd->neighbor_normal = 1;
10742  		break;
10743  	case HLS_UP_ACTIVE:
10744  		if (ppd->host_link_state != HLS_UP_ARMED)
10745  			goto unexpected;
10746  
10747  		set_logical_state(dd, LSTATE_ACTIVE);
10748  		ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10749  		if (ret) {
10750  			dd_dev_err(dd,
10751  				   "%s: logical state did not change to ACTIVE\n",
10752  				   __func__);
10753  		} else {
10754  			/* tell all engines to go running */
10755  			sdma_all_running(dd);
10756  			ppd->host_link_state = HLS_UP_ACTIVE;
10757  			update_statusp(ppd, IB_PORT_ACTIVE);
10758  
10759  			/* Signal the IB layer that the port has went active */
10760  			event.device = &dd->verbs_dev.rdi.ibdev;
10761  			event.element.port_num = ppd->port;
10762  			event.event = IB_EVENT_PORT_ACTIVE;
10763  		}
10764  		break;
10765  	case HLS_DN_POLL:
10766  		if ((ppd->host_link_state == HLS_DN_DISABLE ||
10767  		     ppd->host_link_state == HLS_DN_OFFLINE) &&
10768  		    dd->dc_shutdown)
10769  			dc_start(dd);
10770  		/* Hand LED control to the DC */
10771  		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10772  
10773  		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10774  			u8 tmp = ppd->link_enabled;
10775  
10776  			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10777  			if (ret) {
10778  				ppd->link_enabled = tmp;
10779  				break;
10780  			}
10781  			ppd->remote_link_down_reason = 0;
10782  
10783  			if (ppd->driver_link_ready)
10784  				ppd->link_enabled = 1;
10785  		}
10786  
10787  		set_all_slowpath(ppd->dd);
10788  		ret = set_local_link_attributes(ppd);
10789  		if (ret)
10790  			break;
10791  
10792  		ppd->port_error_action = 0;
10793  
10794  		if (quick_linkup) {
10795  			/* quick linkup does not go into polling */
10796  			ret = do_quick_linkup(dd);
10797  		} else {
10798  			ret1 = set_physical_link_state(dd, PLS_POLLING);
10799  			if (!ret1)
10800  				ret1 = wait_phys_link_out_of_offline(ppd,
10801  								     3000);
10802  			if (ret1 != HCMD_SUCCESS) {
10803  				dd_dev_err(dd,
10804  					   "Failed to transition to Polling link state, return 0x%x\n",
10805  					   ret1);
10806  				ret = -EINVAL;
10807  			}
10808  		}
10809  
10810  		/*
10811  		 * Change the host link state after requesting DC8051 to
10812  		 * change its physical state so that we can ignore any
10813  		 * interrupt with stale LNI(XX) error, which will not be
10814  		 * cleared until DC8051 transitions to Polling state.
10815  		 */
10816  		ppd->host_link_state = HLS_DN_POLL;
10817  		ppd->offline_disabled_reason =
10818  			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10819  		/*
10820  		 * If an error occurred above, go back to offline.  The
10821  		 * caller may reschedule another attempt.
10822  		 */
10823  		if (ret)
10824  			goto_offline(ppd, 0);
10825  		else
10826  			log_physical_state(ppd, PLS_POLLING);
10827  		break;
10828  	case HLS_DN_DISABLE:
10829  		/* link is disabled */
10830  		ppd->link_enabled = 0;
10831  
10832  		/* allow any state to transition to disabled */
10833  
10834  		/* must transition to offline first */
10835  		if (ppd->host_link_state != HLS_DN_OFFLINE) {
10836  			ret = goto_offline(ppd, ppd->remote_link_down_reason);
10837  			if (ret)
10838  				break;
10839  			ppd->remote_link_down_reason = 0;
10840  		}
10841  
10842  		if (!dd->dc_shutdown) {
10843  			ret1 = set_physical_link_state(dd, PLS_DISABLED);
10844  			if (ret1 != HCMD_SUCCESS) {
10845  				dd_dev_err(dd,
10846  					   "Failed to transition to Disabled link state, return 0x%x\n",
10847  					   ret1);
10848  				ret = -EINVAL;
10849  				break;
10850  			}
10851  			ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10852  			if (ret) {
10853  				dd_dev_err(dd,
10854  					   "%s: physical state did not change to DISABLED\n",
10855  					   __func__);
10856  				break;
10857  			}
10858  			dc_shutdown(dd);
10859  		}
10860  		ppd->host_link_state = HLS_DN_DISABLE;
10861  		break;
10862  	case HLS_DN_OFFLINE:
10863  		if (ppd->host_link_state == HLS_DN_DISABLE)
10864  			dc_start(dd);
10865  
10866  		/* allow any state to transition to offline */
10867  		ret = goto_offline(ppd, ppd->remote_link_down_reason);
10868  		if (!ret)
10869  			ppd->remote_link_down_reason = 0;
10870  		break;
10871  	case HLS_VERIFY_CAP:
10872  		if (ppd->host_link_state != HLS_DN_POLL)
10873  			goto unexpected;
10874  		ppd->host_link_state = HLS_VERIFY_CAP;
10875  		log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10876  		break;
10877  	case HLS_GOING_UP:
10878  		if (ppd->host_link_state != HLS_VERIFY_CAP)
10879  			goto unexpected;
10880  
10881  		ret1 = set_physical_link_state(dd, PLS_LINKUP);
10882  		if (ret1 != HCMD_SUCCESS) {
10883  			dd_dev_err(dd,
10884  				   "Failed to transition to link up state, return 0x%x\n",
10885  				   ret1);
10886  			ret = -EINVAL;
10887  			break;
10888  		}
10889  		ppd->host_link_state = HLS_GOING_UP;
10890  		break;
10891  
10892  	case HLS_GOING_OFFLINE:		/* transient within goto_offline() */
10893  	case HLS_LINK_COOLDOWN:		/* transient within goto_offline() */
10894  	default:
10895  		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10896  			    __func__, state);
10897  		ret = -EINVAL;
10898  		break;
10899  	}
10900  
10901  	goto done;
10902  
10903  unexpected:
10904  	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10905  		   __func__, link_state_name(ppd->host_link_state),
10906  		   link_state_name(state));
10907  	ret = -EINVAL;
10908  
10909  done:
10910  	mutex_unlock(&ppd->hls_lock);
10911  
10912  	if (event.device)
10913  		ib_dispatch_event(&event);
10914  
10915  	return ret;
10916  }
10917  
hfi1_set_ib_cfg(struct hfi1_pportdata * ppd,int which,u32 val)10918  int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10919  {
10920  	u64 reg;
10921  	int ret = 0;
10922  
10923  	switch (which) {
10924  	case HFI1_IB_CFG_LIDLMC:
10925  		set_lidlmc(ppd);
10926  		break;
10927  	case HFI1_IB_CFG_VL_HIGH_LIMIT:
10928  		/*
10929  		 * The VL Arbitrator high limit is sent in units of 4k
10930  		 * bytes, while HFI stores it in units of 64 bytes.
10931  		 */
10932  		val *= 4096 / 64;
10933  		reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10934  			<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10935  		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10936  		break;
10937  	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10938  		/* HFI only supports POLL as the default link down state */
10939  		if (val != HLS_DN_POLL)
10940  			ret = -EINVAL;
10941  		break;
10942  	case HFI1_IB_CFG_OP_VLS:
10943  		if (ppd->vls_operational != val) {
10944  			ppd->vls_operational = val;
10945  			if (!ppd->port)
10946  				ret = -EINVAL;
10947  		}
10948  		break;
10949  	/*
10950  	 * For link width, link width downgrade, and speed enable, always AND
10951  	 * the setting with what is actually supported.  This has two benefits.
10952  	 * First, enabled can't have unsupported values, no matter what the
10953  	 * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10954  	 * "fill in with your supported value" have all the bits in the
10955  	 * field set, so simply ANDing with supported has the desired result.
10956  	 */
10957  	case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10958  		ppd->link_width_enabled = val & ppd->link_width_supported;
10959  		break;
10960  	case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10961  		ppd->link_width_downgrade_enabled =
10962  				val & ppd->link_width_downgrade_supported;
10963  		break;
10964  	case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10965  		ppd->link_speed_enabled = val & ppd->link_speed_supported;
10966  		break;
10967  	case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10968  		/*
10969  		 * HFI does not follow IB specs, save this value
10970  		 * so we can report it, if asked.
10971  		 */
10972  		ppd->overrun_threshold = val;
10973  		break;
10974  	case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10975  		/*
10976  		 * HFI does not follow IB specs, save this value
10977  		 * so we can report it, if asked.
10978  		 */
10979  		ppd->phy_error_threshold = val;
10980  		break;
10981  
10982  	case HFI1_IB_CFG_MTU:
10983  		set_send_length(ppd);
10984  		break;
10985  
10986  	case HFI1_IB_CFG_PKEYS:
10987  		if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10988  			set_partition_keys(ppd);
10989  		break;
10990  
10991  	default:
10992  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10993  			dd_dev_info(ppd->dd,
10994  				    "%s: which %s, val 0x%x: not implemented\n",
10995  				    __func__, ib_cfg_name(which), val);
10996  		break;
10997  	}
10998  	return ret;
10999  }
11000  
11001  /* begin functions related to vl arbitration table caching */
init_vl_arb_caches(struct hfi1_pportdata * ppd)11002  static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11003  {
11004  	int i;
11005  
11006  	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11007  			VL_ARB_LOW_PRIO_TABLE_SIZE);
11008  	BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11009  			VL_ARB_HIGH_PRIO_TABLE_SIZE);
11010  
11011  	/*
11012  	 * Note that we always return values directly from the
11013  	 * 'vl_arb_cache' (and do no CSR reads) in response to a
11014  	 * 'Get(VLArbTable)'. This is obviously correct after a
11015  	 * 'Set(VLArbTable)', since the cache will then be up to
11016  	 * date. But it's also correct prior to any 'Set(VLArbTable)'
11017  	 * since then both the cache, and the relevant h/w registers
11018  	 * will be zeroed.
11019  	 */
11020  
11021  	for (i = 0; i < MAX_PRIO_TABLE; i++)
11022  		spin_lock_init(&ppd->vl_arb_cache[i].lock);
11023  }
11024  
11025  /*
11026   * vl_arb_lock_cache
11027   *
11028   * All other vl_arb_* functions should be called only after locking
11029   * the cache.
11030   */
11031  static inline struct vl_arb_cache *
vl_arb_lock_cache(struct hfi1_pportdata * ppd,int idx)11032  vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11033  {
11034  	if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11035  		return NULL;
11036  	spin_lock(&ppd->vl_arb_cache[idx].lock);
11037  	return &ppd->vl_arb_cache[idx];
11038  }
11039  
vl_arb_unlock_cache(struct hfi1_pportdata * ppd,int idx)11040  static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11041  {
11042  	spin_unlock(&ppd->vl_arb_cache[idx].lock);
11043  }
11044  
vl_arb_get_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11045  static void vl_arb_get_cache(struct vl_arb_cache *cache,
11046  			     struct ib_vl_weight_elem *vl)
11047  {
11048  	memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11049  }
11050  
vl_arb_set_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11051  static void vl_arb_set_cache(struct vl_arb_cache *cache,
11052  			     struct ib_vl_weight_elem *vl)
11053  {
11054  	memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11055  }
11056  
vl_arb_match_cache(struct vl_arb_cache * cache,struct ib_vl_weight_elem * vl)11057  static int vl_arb_match_cache(struct vl_arb_cache *cache,
11058  			      struct ib_vl_weight_elem *vl)
11059  {
11060  	return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11061  }
11062  
11063  /* end functions related to vl arbitration table caching */
11064  
set_vl_weights(struct hfi1_pportdata * ppd,u32 target,u32 size,struct ib_vl_weight_elem * vl)11065  static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11066  			  u32 size, struct ib_vl_weight_elem *vl)
11067  {
11068  	struct hfi1_devdata *dd = ppd->dd;
11069  	u64 reg;
11070  	unsigned int i, is_up = 0;
11071  	int drain, ret = 0;
11072  
11073  	mutex_lock(&ppd->hls_lock);
11074  
11075  	if (ppd->host_link_state & HLS_UP)
11076  		is_up = 1;
11077  
11078  	drain = !is_ax(dd) && is_up;
11079  
11080  	if (drain)
11081  		/*
11082  		 * Before adjusting VL arbitration weights, empty per-VL
11083  		 * FIFOs, otherwise a packet whose VL weight is being
11084  		 * set to 0 could get stuck in a FIFO with no chance to
11085  		 * egress.
11086  		 */
11087  		ret = stop_drain_data_vls(dd);
11088  
11089  	if (ret) {
11090  		dd_dev_err(
11091  			dd,
11092  			"%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11093  			__func__);
11094  		goto err;
11095  	}
11096  
11097  	for (i = 0; i < size; i++, vl++) {
11098  		/*
11099  		 * NOTE: The low priority shift and mask are used here, but
11100  		 * they are the same for both the low and high registers.
11101  		 */
11102  		reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11103  				<< SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11104  		      | (((u64)vl->weight
11105  				& SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11106  				<< SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11107  		write_csr(dd, target + (i * 8), reg);
11108  	}
11109  	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11110  
11111  	if (drain)
11112  		open_fill_data_vls(dd); /* reopen all VLs */
11113  
11114  err:
11115  	mutex_unlock(&ppd->hls_lock);
11116  
11117  	return ret;
11118  }
11119  
11120  /*
11121   * Read one credit merge VL register.
11122   */
read_one_cm_vl(struct hfi1_devdata * dd,u32 csr,struct vl_limit * vll)11123  static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11124  			   struct vl_limit *vll)
11125  {
11126  	u64 reg = read_csr(dd, csr);
11127  
11128  	vll->dedicated = cpu_to_be16(
11129  		(reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11130  		& SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11131  	vll->shared = cpu_to_be16(
11132  		(reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11133  		& SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11134  }
11135  
11136  /*
11137   * Read the current credit merge limits.
11138   */
get_buffer_control(struct hfi1_devdata * dd,struct buffer_control * bc,u16 * overall_limit)11139  static int get_buffer_control(struct hfi1_devdata *dd,
11140  			      struct buffer_control *bc, u16 *overall_limit)
11141  {
11142  	u64 reg;
11143  	int i;
11144  
11145  	/* not all entries are filled in */
11146  	memset(bc, 0, sizeof(*bc));
11147  
11148  	/* OPA and HFI have a 1-1 mapping */
11149  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
11150  		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11151  
11152  	/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11153  	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11154  
11155  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11156  	bc->overall_shared_limit = cpu_to_be16(
11157  		(reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11158  		& SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11159  	if (overall_limit)
11160  		*overall_limit = (reg
11161  			>> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11162  			& SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11163  	return sizeof(struct buffer_control);
11164  }
11165  
get_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11166  static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11167  {
11168  	u64 reg;
11169  	int i;
11170  
11171  	/* each register contains 16 SC->VLnt mappings, 4 bits each */
11172  	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11173  	for (i = 0; i < sizeof(u64); i++) {
11174  		u8 byte = *(((u8 *)&reg) + i);
11175  
11176  		dp->vlnt[2 * i] = byte & 0xf;
11177  		dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11178  	}
11179  
11180  	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11181  	for (i = 0; i < sizeof(u64); i++) {
11182  		u8 byte = *(((u8 *)&reg) + i);
11183  
11184  		dp->vlnt[16 + (2 * i)] = byte & 0xf;
11185  		dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11186  	}
11187  	return sizeof(struct sc2vlnt);
11188  }
11189  
get_vlarb_preempt(struct hfi1_devdata * dd,u32 nelems,struct ib_vl_weight_elem * vl)11190  static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11191  			      struct ib_vl_weight_elem *vl)
11192  {
11193  	unsigned int i;
11194  
11195  	for (i = 0; i < nelems; i++, vl++) {
11196  		vl->vl = 0xf;
11197  		vl->weight = 0;
11198  	}
11199  }
11200  
set_sc2vlnt(struct hfi1_devdata * dd,struct sc2vlnt * dp)11201  static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11202  {
11203  	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11204  		  DC_SC_VL_VAL(15_0,
11205  			       0, dp->vlnt[0] & 0xf,
11206  			       1, dp->vlnt[1] & 0xf,
11207  			       2, dp->vlnt[2] & 0xf,
11208  			       3, dp->vlnt[3] & 0xf,
11209  			       4, dp->vlnt[4] & 0xf,
11210  			       5, dp->vlnt[5] & 0xf,
11211  			       6, dp->vlnt[6] & 0xf,
11212  			       7, dp->vlnt[7] & 0xf,
11213  			       8, dp->vlnt[8] & 0xf,
11214  			       9, dp->vlnt[9] & 0xf,
11215  			       10, dp->vlnt[10] & 0xf,
11216  			       11, dp->vlnt[11] & 0xf,
11217  			       12, dp->vlnt[12] & 0xf,
11218  			       13, dp->vlnt[13] & 0xf,
11219  			       14, dp->vlnt[14] & 0xf,
11220  			       15, dp->vlnt[15] & 0xf));
11221  	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11222  		  DC_SC_VL_VAL(31_16,
11223  			       16, dp->vlnt[16] & 0xf,
11224  			       17, dp->vlnt[17] & 0xf,
11225  			       18, dp->vlnt[18] & 0xf,
11226  			       19, dp->vlnt[19] & 0xf,
11227  			       20, dp->vlnt[20] & 0xf,
11228  			       21, dp->vlnt[21] & 0xf,
11229  			       22, dp->vlnt[22] & 0xf,
11230  			       23, dp->vlnt[23] & 0xf,
11231  			       24, dp->vlnt[24] & 0xf,
11232  			       25, dp->vlnt[25] & 0xf,
11233  			       26, dp->vlnt[26] & 0xf,
11234  			       27, dp->vlnt[27] & 0xf,
11235  			       28, dp->vlnt[28] & 0xf,
11236  			       29, dp->vlnt[29] & 0xf,
11237  			       30, dp->vlnt[30] & 0xf,
11238  			       31, dp->vlnt[31] & 0xf));
11239  }
11240  
nonzero_msg(struct hfi1_devdata * dd,int idx,const char * what,u16 limit)11241  static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11242  			u16 limit)
11243  {
11244  	if (limit != 0)
11245  		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11246  			    what, (int)limit, idx);
11247  }
11248  
11249  /* change only the shared limit portion of SendCmGLobalCredit */
set_global_shared(struct hfi1_devdata * dd,u16 limit)11250  static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11251  {
11252  	u64 reg;
11253  
11254  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11255  	reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11256  	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11257  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11258  }
11259  
11260  /* change only the total credit limit portion of SendCmGLobalCredit */
set_global_limit(struct hfi1_devdata * dd,u16 limit)11261  static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11262  {
11263  	u64 reg;
11264  
11265  	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11266  	reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11267  	reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11268  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11269  }
11270  
11271  /* set the given per-VL shared limit */
set_vl_shared(struct hfi1_devdata * dd,int vl,u16 limit)11272  static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11273  {
11274  	u64 reg;
11275  	u32 addr;
11276  
11277  	if (vl < TXE_NUM_DATA_VL)
11278  		addr = SEND_CM_CREDIT_VL + (8 * vl);
11279  	else
11280  		addr = SEND_CM_CREDIT_VL15;
11281  
11282  	reg = read_csr(dd, addr);
11283  	reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11284  	reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11285  	write_csr(dd, addr, reg);
11286  }
11287  
11288  /* set the given per-VL dedicated limit */
set_vl_dedicated(struct hfi1_devdata * dd,int vl,u16 limit)11289  static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11290  {
11291  	u64 reg;
11292  	u32 addr;
11293  
11294  	if (vl < TXE_NUM_DATA_VL)
11295  		addr = SEND_CM_CREDIT_VL + (8 * vl);
11296  	else
11297  		addr = SEND_CM_CREDIT_VL15;
11298  
11299  	reg = read_csr(dd, addr);
11300  	reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11301  	reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11302  	write_csr(dd, addr, reg);
11303  }
11304  
11305  /* spin until the given per-VL status mask bits clear */
wait_for_vl_status_clear(struct hfi1_devdata * dd,u64 mask,const char * which)11306  static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11307  				     const char *which)
11308  {
11309  	unsigned long timeout;
11310  	u64 reg;
11311  
11312  	timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11313  	while (1) {
11314  		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11315  
11316  		if (reg == 0)
11317  			return;	/* success */
11318  		if (time_after(jiffies, timeout))
11319  			break;		/* timed out */
11320  		udelay(1);
11321  	}
11322  
11323  	dd_dev_err(dd,
11324  		   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11325  		   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11326  	/*
11327  	 * If this occurs, it is likely there was a credit loss on the link.
11328  	 * The only recovery from that is a link bounce.
11329  	 */
11330  	dd_dev_err(dd,
11331  		   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11332  }
11333  
11334  /*
11335   * The number of credits on the VLs may be changed while everything
11336   * is "live", but the following algorithm must be followed due to
11337   * how the hardware is actually implemented.  In particular,
11338   * Return_Credit_Status[] is the only correct status check.
11339   *
11340   * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11341   *     set Global_Shared_Credit_Limit = 0
11342   *     use_all_vl = 1
11343   * mask0 = all VLs that are changing either dedicated or shared limits
11344   * set Shared_Limit[mask0] = 0
11345   * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11346   * if (changing any dedicated limit)
11347   *     mask1 = all VLs that are lowering dedicated limits
11348   *     lower Dedicated_Limit[mask1]
11349   *     spin until Return_Credit_Status[mask1] == 0
11350   *     raise Dedicated_Limits
11351   * raise Shared_Limits
11352   * raise Global_Shared_Credit_Limit
11353   *
11354   * lower = if the new limit is lower, set the limit to the new value
11355   * raise = if the new limit is higher than the current value (may be changed
11356   *	earlier in the algorithm), set the new limit to the new value
11357   */
set_buffer_control(struct hfi1_pportdata * ppd,struct buffer_control * new_bc)11358  int set_buffer_control(struct hfi1_pportdata *ppd,
11359  		       struct buffer_control *new_bc)
11360  {
11361  	struct hfi1_devdata *dd = ppd->dd;
11362  	u64 changing_mask, ld_mask, stat_mask;
11363  	int change_count;
11364  	int i, use_all_mask;
11365  	int this_shared_changing;
11366  	int vl_count = 0, ret;
11367  	/*
11368  	 * A0: add the variable any_shared_limit_changing below and in the
11369  	 * algorithm above.  If removing A0 support, it can be removed.
11370  	 */
11371  	int any_shared_limit_changing;
11372  	struct buffer_control cur_bc;
11373  	u8 changing[OPA_MAX_VLS];
11374  	u8 lowering_dedicated[OPA_MAX_VLS];
11375  	u16 cur_total;
11376  	u32 new_total = 0;
11377  	const u64 all_mask =
11378  	SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11379  	 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11380  	 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11381  	 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11382  	 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11383  	 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11384  	 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11385  	 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11386  	 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11387  
11388  #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11389  #define NUM_USABLE_VLS 16	/* look at VL15 and less */
11390  
11391  	/* find the new total credits, do sanity check on unused VLs */
11392  	for (i = 0; i < OPA_MAX_VLS; i++) {
11393  		if (valid_vl(i)) {
11394  			new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11395  			continue;
11396  		}
11397  		nonzero_msg(dd, i, "dedicated",
11398  			    be16_to_cpu(new_bc->vl[i].dedicated));
11399  		nonzero_msg(dd, i, "shared",
11400  			    be16_to_cpu(new_bc->vl[i].shared));
11401  		new_bc->vl[i].dedicated = 0;
11402  		new_bc->vl[i].shared = 0;
11403  	}
11404  	new_total += be16_to_cpu(new_bc->overall_shared_limit);
11405  
11406  	/* fetch the current values */
11407  	get_buffer_control(dd, &cur_bc, &cur_total);
11408  
11409  	/*
11410  	 * Create the masks we will use.
11411  	 */
11412  	memset(changing, 0, sizeof(changing));
11413  	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11414  	/*
11415  	 * NOTE: Assumes that the individual VL bits are adjacent and in
11416  	 * increasing order
11417  	 */
11418  	stat_mask =
11419  		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11420  	changing_mask = 0;
11421  	ld_mask = 0;
11422  	change_count = 0;
11423  	any_shared_limit_changing = 0;
11424  	for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11425  		if (!valid_vl(i))
11426  			continue;
11427  		this_shared_changing = new_bc->vl[i].shared
11428  						!= cur_bc.vl[i].shared;
11429  		if (this_shared_changing)
11430  			any_shared_limit_changing = 1;
11431  		if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11432  		    this_shared_changing) {
11433  			changing[i] = 1;
11434  			changing_mask |= stat_mask;
11435  			change_count++;
11436  		}
11437  		if (be16_to_cpu(new_bc->vl[i].dedicated) <
11438  					be16_to_cpu(cur_bc.vl[i].dedicated)) {
11439  			lowering_dedicated[i] = 1;
11440  			ld_mask |= stat_mask;
11441  		}
11442  	}
11443  
11444  	/* bracket the credit change with a total adjustment */
11445  	if (new_total > cur_total)
11446  		set_global_limit(dd, new_total);
11447  
11448  	/*
11449  	 * Start the credit change algorithm.
11450  	 */
11451  	use_all_mask = 0;
11452  	if ((be16_to_cpu(new_bc->overall_shared_limit) <
11453  	     be16_to_cpu(cur_bc.overall_shared_limit)) ||
11454  	    (is_ax(dd) && any_shared_limit_changing)) {
11455  		set_global_shared(dd, 0);
11456  		cur_bc.overall_shared_limit = 0;
11457  		use_all_mask = 1;
11458  	}
11459  
11460  	for (i = 0; i < NUM_USABLE_VLS; i++) {
11461  		if (!valid_vl(i))
11462  			continue;
11463  
11464  		if (changing[i]) {
11465  			set_vl_shared(dd, i, 0);
11466  			cur_bc.vl[i].shared = 0;
11467  		}
11468  	}
11469  
11470  	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11471  				 "shared");
11472  
11473  	if (change_count > 0) {
11474  		for (i = 0; i < NUM_USABLE_VLS; i++) {
11475  			if (!valid_vl(i))
11476  				continue;
11477  
11478  			if (lowering_dedicated[i]) {
11479  				set_vl_dedicated(dd, i,
11480  						 be16_to_cpu(new_bc->
11481  							     vl[i].dedicated));
11482  				cur_bc.vl[i].dedicated =
11483  						new_bc->vl[i].dedicated;
11484  			}
11485  		}
11486  
11487  		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11488  
11489  		/* now raise all dedicated that are going up */
11490  		for (i = 0; i < NUM_USABLE_VLS; i++) {
11491  			if (!valid_vl(i))
11492  				continue;
11493  
11494  			if (be16_to_cpu(new_bc->vl[i].dedicated) >
11495  					be16_to_cpu(cur_bc.vl[i].dedicated))
11496  				set_vl_dedicated(dd, i,
11497  						 be16_to_cpu(new_bc->
11498  							     vl[i].dedicated));
11499  		}
11500  	}
11501  
11502  	/* next raise all shared that are going up */
11503  	for (i = 0; i < NUM_USABLE_VLS; i++) {
11504  		if (!valid_vl(i))
11505  			continue;
11506  
11507  		if (be16_to_cpu(new_bc->vl[i].shared) >
11508  				be16_to_cpu(cur_bc.vl[i].shared))
11509  			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11510  	}
11511  
11512  	/* finally raise the global shared */
11513  	if (be16_to_cpu(new_bc->overall_shared_limit) >
11514  	    be16_to_cpu(cur_bc.overall_shared_limit))
11515  		set_global_shared(dd,
11516  				  be16_to_cpu(new_bc->overall_shared_limit));
11517  
11518  	/* bracket the credit change with a total adjustment */
11519  	if (new_total < cur_total)
11520  		set_global_limit(dd, new_total);
11521  
11522  	/*
11523  	 * Determine the actual number of operational VLS using the number of
11524  	 * dedicated and shared credits for each VL.
11525  	 */
11526  	if (change_count > 0) {
11527  		for (i = 0; i < TXE_NUM_DATA_VL; i++)
11528  			if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11529  			    be16_to_cpu(new_bc->vl[i].shared) > 0)
11530  				vl_count++;
11531  		ppd->actual_vls_operational = vl_count;
11532  		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11533  				    ppd->actual_vls_operational :
11534  				    ppd->vls_operational,
11535  				    NULL);
11536  		if (ret == 0)
11537  			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11538  					   ppd->actual_vls_operational :
11539  					   ppd->vls_operational, NULL);
11540  		if (ret)
11541  			return ret;
11542  	}
11543  	return 0;
11544  }
11545  
11546  /*
11547   * Read the given fabric manager table. Return the size of the
11548   * table (in bytes) on success, and a negative error code on
11549   * failure.
11550   */
fm_get_table(struct hfi1_pportdata * ppd,int which,void * t)11551  int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11552  
11553  {
11554  	int size;
11555  	struct vl_arb_cache *vlc;
11556  
11557  	switch (which) {
11558  	case FM_TBL_VL_HIGH_ARB:
11559  		size = 256;
11560  		/*
11561  		 * OPA specifies 128 elements (of 2 bytes each), though
11562  		 * HFI supports only 16 elements in h/w.
11563  		 */
11564  		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11565  		vl_arb_get_cache(vlc, t);
11566  		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11567  		break;
11568  	case FM_TBL_VL_LOW_ARB:
11569  		size = 256;
11570  		/*
11571  		 * OPA specifies 128 elements (of 2 bytes each), though
11572  		 * HFI supports only 16 elements in h/w.
11573  		 */
11574  		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11575  		vl_arb_get_cache(vlc, t);
11576  		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11577  		break;
11578  	case FM_TBL_BUFFER_CONTROL:
11579  		size = get_buffer_control(ppd->dd, t, NULL);
11580  		break;
11581  	case FM_TBL_SC2VLNT:
11582  		size = get_sc2vlnt(ppd->dd, t);
11583  		break;
11584  	case FM_TBL_VL_PREEMPT_ELEMS:
11585  		size = 256;
11586  		/* OPA specifies 128 elements, of 2 bytes each */
11587  		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11588  		break;
11589  	case FM_TBL_VL_PREEMPT_MATRIX:
11590  		size = 256;
11591  		/*
11592  		 * OPA specifies that this is the same size as the VL
11593  		 * arbitration tables (i.e., 256 bytes).
11594  		 */
11595  		break;
11596  	default:
11597  		return -EINVAL;
11598  	}
11599  	return size;
11600  }
11601  
11602  /*
11603   * Write the given fabric manager table.
11604   */
fm_set_table(struct hfi1_pportdata * ppd,int which,void * t)11605  int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11606  {
11607  	int ret = 0;
11608  	struct vl_arb_cache *vlc;
11609  
11610  	switch (which) {
11611  	case FM_TBL_VL_HIGH_ARB:
11612  		vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11613  		if (vl_arb_match_cache(vlc, t)) {
11614  			vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11615  			break;
11616  		}
11617  		vl_arb_set_cache(vlc, t);
11618  		vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11619  		ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11620  				     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11621  		break;
11622  	case FM_TBL_VL_LOW_ARB:
11623  		vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11624  		if (vl_arb_match_cache(vlc, t)) {
11625  			vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11626  			break;
11627  		}
11628  		vl_arb_set_cache(vlc, t);
11629  		vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11630  		ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11631  				     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11632  		break;
11633  	case FM_TBL_BUFFER_CONTROL:
11634  		ret = set_buffer_control(ppd, t);
11635  		break;
11636  	case FM_TBL_SC2VLNT:
11637  		set_sc2vlnt(ppd->dd, t);
11638  		break;
11639  	default:
11640  		ret = -EINVAL;
11641  	}
11642  	return ret;
11643  }
11644  
11645  /*
11646   * Disable all data VLs.
11647   *
11648   * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11649   */
disable_data_vls(struct hfi1_devdata * dd)11650  static int disable_data_vls(struct hfi1_devdata *dd)
11651  {
11652  	if (is_ax(dd))
11653  		return 1;
11654  
11655  	pio_send_control(dd, PSC_DATA_VL_DISABLE);
11656  
11657  	return 0;
11658  }
11659  
11660  /*
11661   * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11662   * Just re-enables all data VLs (the "fill" part happens
11663   * automatically - the name was chosen for symmetry with
11664   * stop_drain_data_vls()).
11665   *
11666   * Return 0 if successful, non-zero if the VLs cannot be enabled.
11667   */
open_fill_data_vls(struct hfi1_devdata * dd)11668  int open_fill_data_vls(struct hfi1_devdata *dd)
11669  {
11670  	if (is_ax(dd))
11671  		return 1;
11672  
11673  	pio_send_control(dd, PSC_DATA_VL_ENABLE);
11674  
11675  	return 0;
11676  }
11677  
11678  /*
11679   * drain_data_vls() - assumes that disable_data_vls() has been called,
11680   * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11681   * engines to drop to 0.
11682   */
drain_data_vls(struct hfi1_devdata * dd)11683  static void drain_data_vls(struct hfi1_devdata *dd)
11684  {
11685  	sc_wait(dd);
11686  	sdma_wait(dd);
11687  	pause_for_credit_return(dd);
11688  }
11689  
11690  /*
11691   * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11692   *
11693   * Use open_fill_data_vls() to resume using data VLs.  This pair is
11694   * meant to be used like this:
11695   *
11696   * stop_drain_data_vls(dd);
11697   * // do things with per-VL resources
11698   * open_fill_data_vls(dd);
11699   */
stop_drain_data_vls(struct hfi1_devdata * dd)11700  int stop_drain_data_vls(struct hfi1_devdata *dd)
11701  {
11702  	int ret;
11703  
11704  	ret = disable_data_vls(dd);
11705  	if (ret == 0)
11706  		drain_data_vls(dd);
11707  
11708  	return ret;
11709  }
11710  
11711  /*
11712   * Convert a nanosecond time to a cclock count.  No matter how slow
11713   * the cclock, a non-zero ns will always have a non-zero result.
11714   */
ns_to_cclock(struct hfi1_devdata * dd,u32 ns)11715  u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11716  {
11717  	u32 cclocks;
11718  
11719  	if (dd->icode == ICODE_FPGA_EMULATION)
11720  		cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11721  	else  /* simulation pretends to be ASIC */
11722  		cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11723  	if (ns && !cclocks)	/* if ns nonzero, must be at least 1 */
11724  		cclocks = 1;
11725  	return cclocks;
11726  }
11727  
11728  /*
11729   * Convert a cclock count to nanoseconds. Not matter how slow
11730   * the cclock, a non-zero cclocks will always have a non-zero result.
11731   */
cclock_to_ns(struct hfi1_devdata * dd,u32 cclocks)11732  u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11733  {
11734  	u32 ns;
11735  
11736  	if (dd->icode == ICODE_FPGA_EMULATION)
11737  		ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11738  	else  /* simulation pretends to be ASIC */
11739  		ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11740  	if (cclocks && !ns)
11741  		ns = 1;
11742  	return ns;
11743  }
11744  
11745  /*
11746   * Dynamically adjust the receive interrupt timeout for a context based on
11747   * incoming packet rate.
11748   *
11749   * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11750   */
adjust_rcv_timeout(struct hfi1_ctxtdata * rcd,u32 npkts)11751  static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11752  {
11753  	struct hfi1_devdata *dd = rcd->dd;
11754  	u32 timeout = rcd->rcvavail_timeout;
11755  
11756  	/*
11757  	 * This algorithm doubles or halves the timeout depending on whether
11758  	 * the number of packets received in this interrupt were less than or
11759  	 * greater equal the interrupt count.
11760  	 *
11761  	 * The calculations below do not allow a steady state to be achieved.
11762  	 * Only at the endpoints it is possible to have an unchanging
11763  	 * timeout.
11764  	 */
11765  	if (npkts < rcv_intr_count) {
11766  		/*
11767  		 * Not enough packets arrived before the timeout, adjust
11768  		 * timeout downward.
11769  		 */
11770  		if (timeout < 2) /* already at minimum? */
11771  			return;
11772  		timeout >>= 1;
11773  	} else {
11774  		/*
11775  		 * More than enough packets arrived before the timeout, adjust
11776  		 * timeout upward.
11777  		 */
11778  		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11779  			return;
11780  		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11781  	}
11782  
11783  	rcd->rcvavail_timeout = timeout;
11784  	/*
11785  	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11786  	 * been verified to be in range
11787  	 */
11788  	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11789  			(u64)timeout <<
11790  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11791  }
11792  
update_usrhead(struct hfi1_ctxtdata * rcd,u32 hd,u32 updegr,u32 egrhd,u32 intr_adjust,u32 npkts)11793  void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11794  		    u32 intr_adjust, u32 npkts)
11795  {
11796  	struct hfi1_devdata *dd = rcd->dd;
11797  	u64 reg;
11798  	u32 ctxt = rcd->ctxt;
11799  
11800  	/*
11801  	 * Need to write timeout register before updating RcvHdrHead to ensure
11802  	 * that a new value is used when the HW decides to restart counting.
11803  	 */
11804  	if (intr_adjust)
11805  		adjust_rcv_timeout(rcd, npkts);
11806  	if (updegr) {
11807  		reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11808  			<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11809  		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11810  	}
11811  	reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11812  		(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11813  			<< RCV_HDR_HEAD_HEAD_SHIFT);
11814  	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11815  }
11816  
hdrqempty(struct hfi1_ctxtdata * rcd)11817  u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11818  {
11819  	u32 head, tail;
11820  
11821  	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11822  		& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11823  
11824  	if (rcd->rcvhdrtail_kvaddr)
11825  		tail = get_rcvhdrtail(rcd);
11826  	else
11827  		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11828  
11829  	return head == tail;
11830  }
11831  
11832  /*
11833   * Context Control and Receive Array encoding for buffer size:
11834   *	0x0 invalid
11835   *	0x1   4 KB
11836   *	0x2   8 KB
11837   *	0x3  16 KB
11838   *	0x4  32 KB
11839   *	0x5  64 KB
11840   *	0x6 128 KB
11841   *	0x7 256 KB
11842   *	0x8 512 KB (Receive Array only)
11843   *	0x9   1 MB (Receive Array only)
11844   *	0xa   2 MB (Receive Array only)
11845   *
11846   *	0xB-0xF - reserved (Receive Array only)
11847   *
11848   *
11849   * This routine assumes that the value has already been sanity checked.
11850   */
encoded_size(u32 size)11851  static u32 encoded_size(u32 size)
11852  {
11853  	switch (size) {
11854  	case   4 * 1024: return 0x1;
11855  	case   8 * 1024: return 0x2;
11856  	case  16 * 1024: return 0x3;
11857  	case  32 * 1024: return 0x4;
11858  	case  64 * 1024: return 0x5;
11859  	case 128 * 1024: return 0x6;
11860  	case 256 * 1024: return 0x7;
11861  	case 512 * 1024: return 0x8;
11862  	case   1 * 1024 * 1024: return 0x9;
11863  	case   2 * 1024 * 1024: return 0xa;
11864  	}
11865  	return 0x1;	/* if invalid, go with the minimum size */
11866  }
11867  
hfi1_rcvctrl(struct hfi1_devdata * dd,unsigned int op,struct hfi1_ctxtdata * rcd)11868  void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11869  		  struct hfi1_ctxtdata *rcd)
11870  {
11871  	u64 rcvctrl, reg;
11872  	int did_enable = 0;
11873  	u16 ctxt;
11874  
11875  	if (!rcd)
11876  		return;
11877  
11878  	ctxt = rcd->ctxt;
11879  
11880  	hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11881  
11882  	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11883  	/* if the context already enabled, don't do the extra steps */
11884  	if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11885  	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11886  		/* reset the tail and hdr addresses, and sequence count */
11887  		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11888  				rcd->rcvhdrq_dma);
11889  		if (rcd->rcvhdrtail_kvaddr)
11890  			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11891  					rcd->rcvhdrqtailaddr_dma);
11892  		rcd->seq_cnt = 1;
11893  
11894  		/* reset the cached receive header queue head value */
11895  		rcd->head = 0;
11896  
11897  		/*
11898  		 * Zero the receive header queue so we don't get false
11899  		 * positives when checking the sequence number.  The
11900  		 * sequence numbers could land exactly on the same spot.
11901  		 * E.g. a rcd restart before the receive header wrapped.
11902  		 */
11903  		memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11904  
11905  		/* starting timeout */
11906  		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11907  
11908  		/* enable the context */
11909  		rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11910  
11911  		/* clean the egr buffer size first */
11912  		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11913  		rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11914  				& RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11915  					<< RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11916  
11917  		/* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11918  		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11919  		did_enable = 1;
11920  
11921  		/* zero RcvEgrIndexHead */
11922  		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11923  
11924  		/* set eager count and base index */
11925  		reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11926  			& RCV_EGR_CTRL_EGR_CNT_MASK)
11927  		       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11928  			(((rcd->eager_base >> RCV_SHIFT)
11929  			  & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11930  			 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11931  		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11932  
11933  		/*
11934  		 * Set TID (expected) count and base index.
11935  		 * rcd->expected_count is set to individual RcvArray entries,
11936  		 * not pairs, and the CSR takes a pair-count in groups of
11937  		 * four, so divide by 8.
11938  		 */
11939  		reg = (((rcd->expected_count >> RCV_SHIFT)
11940  					& RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11941  				<< RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11942  		      (((rcd->expected_base >> RCV_SHIFT)
11943  					& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11944  				<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11945  		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11946  		if (ctxt == HFI1_CTRL_CTXT)
11947  			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11948  	}
11949  	if (op & HFI1_RCVCTRL_CTXT_DIS) {
11950  		write_csr(dd, RCV_VL15, 0);
11951  		/*
11952  		 * When receive context is being disabled turn on tail
11953  		 * update with a dummy tail address and then disable
11954  		 * receive context.
11955  		 */
11956  		if (dd->rcvhdrtail_dummy_dma) {
11957  			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11958  					dd->rcvhdrtail_dummy_dma);
11959  			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11960  			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11961  		}
11962  
11963  		rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11964  	}
11965  	if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11966  		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11967  			      IS_RCVAVAIL_START + rcd->ctxt, true);
11968  		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11969  	}
11970  	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11971  		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11972  			      IS_RCVAVAIL_START + rcd->ctxt, false);
11973  		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11974  	}
11975  	if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11976  		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11977  	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11978  		/* See comment on RcvCtxtCtrl.TailUpd above */
11979  		if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11980  			rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11981  	}
11982  	if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11983  		rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11984  	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11985  		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11986  	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11987  		/*
11988  		 * In one-packet-per-eager mode, the size comes from
11989  		 * the RcvArray entry.
11990  		 */
11991  		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11992  		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11993  	}
11994  	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11995  		rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11996  	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11997  		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11998  	if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11999  		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12000  	if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12001  		rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12002  	if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12003  		rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12004  	if (op & HFI1_RCVCTRL_URGENT_ENB)
12005  		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12006  			      IS_RCVURGENT_START + rcd->ctxt, true);
12007  	if (op & HFI1_RCVCTRL_URGENT_DIS)
12008  		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12009  			      IS_RCVURGENT_START + rcd->ctxt, false);
12010  
12011  	hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12012  	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12013  
12014  	/* work around sticky RcvCtxtStatus.BlockedRHQFull */
12015  	if (did_enable &&
12016  	    (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12017  		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12018  		if (reg != 0) {
12019  			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12020  				    ctxt, reg);
12021  			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12022  			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12023  			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12024  			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12025  			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12026  			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12027  				    ctxt, reg, reg == 0 ? "not" : "still");
12028  		}
12029  	}
12030  
12031  	if (did_enable) {
12032  		/*
12033  		 * The interrupt timeout and count must be set after
12034  		 * the context is enabled to take effect.
12035  		 */
12036  		/* set interrupt timeout */
12037  		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12038  				(u64)rcd->rcvavail_timeout <<
12039  				RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12040  
12041  		/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12042  		reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12043  		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12044  	}
12045  
12046  	if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12047  		/*
12048  		 * If the context has been disabled and the Tail Update has
12049  		 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12050  		 * so it doesn't contain an address that is invalid.
12051  		 */
12052  		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12053  				dd->rcvhdrtail_dummy_dma);
12054  }
12055  
hfi1_read_cntrs(struct hfi1_devdata * dd,char ** namep,u64 ** cntrp)12056  u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12057  {
12058  	int ret;
12059  	u64 val = 0;
12060  
12061  	if (namep) {
12062  		ret = dd->cntrnameslen;
12063  		*namep = dd->cntrnames;
12064  	} else {
12065  		const struct cntr_entry *entry;
12066  		int i, j;
12067  
12068  		ret = (dd->ndevcntrs) * sizeof(u64);
12069  
12070  		/* Get the start of the block of counters */
12071  		*cntrp = dd->cntrs;
12072  
12073  		/*
12074  		 * Now go and fill in each counter in the block.
12075  		 */
12076  		for (i = 0; i < DEV_CNTR_LAST; i++) {
12077  			entry = &dev_cntrs[i];
12078  			hfi1_cdbg(CNTR, "reading %s", entry->name);
12079  			if (entry->flags & CNTR_DISABLED) {
12080  				/* Nothing */
12081  				hfi1_cdbg(CNTR, "\tDisabled\n");
12082  			} else {
12083  				if (entry->flags & CNTR_VL) {
12084  					hfi1_cdbg(CNTR, "\tPer VL\n");
12085  					for (j = 0; j < C_VL_COUNT; j++) {
12086  						val = entry->rw_cntr(entry,
12087  								  dd, j,
12088  								  CNTR_MODE_R,
12089  								  0);
12090  						hfi1_cdbg(
12091  						   CNTR,
12092  						   "\t\tRead 0x%llx for %d\n",
12093  						   val, j);
12094  						dd->cntrs[entry->offset + j] =
12095  									    val;
12096  					}
12097  				} else if (entry->flags & CNTR_SDMA) {
12098  					hfi1_cdbg(CNTR,
12099  						  "\t Per SDMA Engine\n");
12100  					for (j = 0; j < chip_sdma_engines(dd);
12101  					     j++) {
12102  						val =
12103  						entry->rw_cntr(entry, dd, j,
12104  							       CNTR_MODE_R, 0);
12105  						hfi1_cdbg(CNTR,
12106  							  "\t\tRead 0x%llx for %d\n",
12107  							  val, j);
12108  						dd->cntrs[entry->offset + j] =
12109  									val;
12110  					}
12111  				} else {
12112  					val = entry->rw_cntr(entry, dd,
12113  							CNTR_INVALID_VL,
12114  							CNTR_MODE_R, 0);
12115  					dd->cntrs[entry->offset] = val;
12116  					hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12117  				}
12118  			}
12119  		}
12120  	}
12121  	return ret;
12122  }
12123  
12124  /*
12125   * Used by sysfs to create files for hfi stats to read
12126   */
hfi1_read_portcntrs(struct hfi1_pportdata * ppd,char ** namep,u64 ** cntrp)12127  u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12128  {
12129  	int ret;
12130  	u64 val = 0;
12131  
12132  	if (namep) {
12133  		ret = ppd->dd->portcntrnameslen;
12134  		*namep = ppd->dd->portcntrnames;
12135  	} else {
12136  		const struct cntr_entry *entry;
12137  		int i, j;
12138  
12139  		ret = ppd->dd->nportcntrs * sizeof(u64);
12140  		*cntrp = ppd->cntrs;
12141  
12142  		for (i = 0; i < PORT_CNTR_LAST; i++) {
12143  			entry = &port_cntrs[i];
12144  			hfi1_cdbg(CNTR, "reading %s", entry->name);
12145  			if (entry->flags & CNTR_DISABLED) {
12146  				/* Nothing */
12147  				hfi1_cdbg(CNTR, "\tDisabled\n");
12148  				continue;
12149  			}
12150  
12151  			if (entry->flags & CNTR_VL) {
12152  				hfi1_cdbg(CNTR, "\tPer VL");
12153  				for (j = 0; j < C_VL_COUNT; j++) {
12154  					val = entry->rw_cntr(entry, ppd, j,
12155  							       CNTR_MODE_R,
12156  							       0);
12157  					hfi1_cdbg(
12158  					   CNTR,
12159  					   "\t\tRead 0x%llx for %d",
12160  					   val, j);
12161  					ppd->cntrs[entry->offset + j] = val;
12162  				}
12163  			} else {
12164  				val = entry->rw_cntr(entry, ppd,
12165  						       CNTR_INVALID_VL,
12166  						       CNTR_MODE_R,
12167  						       0);
12168  				ppd->cntrs[entry->offset] = val;
12169  				hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12170  			}
12171  		}
12172  	}
12173  	return ret;
12174  }
12175  
free_cntrs(struct hfi1_devdata * dd)12176  static void free_cntrs(struct hfi1_devdata *dd)
12177  {
12178  	struct hfi1_pportdata *ppd;
12179  	int i;
12180  
12181  	if (dd->synth_stats_timer.function)
12182  		del_timer_sync(&dd->synth_stats_timer);
12183  	ppd = (struct hfi1_pportdata *)(dd + 1);
12184  	for (i = 0; i < dd->num_pports; i++, ppd++) {
12185  		kfree(ppd->cntrs);
12186  		kfree(ppd->scntrs);
12187  		free_percpu(ppd->ibport_data.rvp.rc_acks);
12188  		free_percpu(ppd->ibport_data.rvp.rc_qacks);
12189  		free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12190  		ppd->cntrs = NULL;
12191  		ppd->scntrs = NULL;
12192  		ppd->ibport_data.rvp.rc_acks = NULL;
12193  		ppd->ibport_data.rvp.rc_qacks = NULL;
12194  		ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12195  	}
12196  	kfree(dd->portcntrnames);
12197  	dd->portcntrnames = NULL;
12198  	kfree(dd->cntrs);
12199  	dd->cntrs = NULL;
12200  	kfree(dd->scntrs);
12201  	dd->scntrs = NULL;
12202  	kfree(dd->cntrnames);
12203  	dd->cntrnames = NULL;
12204  	if (dd->update_cntr_wq) {
12205  		destroy_workqueue(dd->update_cntr_wq);
12206  		dd->update_cntr_wq = NULL;
12207  	}
12208  }
12209  
read_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl)12210  static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12211  			      u64 *psval, void *context, int vl)
12212  {
12213  	u64 val;
12214  	u64 sval = *psval;
12215  
12216  	if (entry->flags & CNTR_DISABLED) {
12217  		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12218  		return 0;
12219  	}
12220  
12221  	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12222  
12223  	val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12224  
12225  	/* If its a synthetic counter there is more work we need to do */
12226  	if (entry->flags & CNTR_SYNTH) {
12227  		if (sval == CNTR_MAX) {
12228  			/* No need to read already saturated */
12229  			return CNTR_MAX;
12230  		}
12231  
12232  		if (entry->flags & CNTR_32BIT) {
12233  			/* 32bit counters can wrap multiple times */
12234  			u64 upper = sval >> 32;
12235  			u64 lower = (sval << 32) >> 32;
12236  
12237  			if (lower > val) { /* hw wrapped */
12238  				if (upper == CNTR_32BIT_MAX)
12239  					val = CNTR_MAX;
12240  				else
12241  					upper++;
12242  			}
12243  
12244  			if (val != CNTR_MAX)
12245  				val = (upper << 32) | val;
12246  
12247  		} else {
12248  			/* If we rolled we are saturated */
12249  			if ((val < sval) || (val > CNTR_MAX))
12250  				val = CNTR_MAX;
12251  		}
12252  	}
12253  
12254  	*psval = val;
12255  
12256  	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12257  
12258  	return val;
12259  }
12260  
write_dev_port_cntr(struct hfi1_devdata * dd,struct cntr_entry * entry,u64 * psval,void * context,int vl,u64 data)12261  static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12262  			       struct cntr_entry *entry,
12263  			       u64 *psval, void *context, int vl, u64 data)
12264  {
12265  	u64 val;
12266  
12267  	if (entry->flags & CNTR_DISABLED) {
12268  		dd_dev_err(dd, "Counter %s not enabled", entry->name);
12269  		return 0;
12270  	}
12271  
12272  	hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12273  
12274  	if (entry->flags & CNTR_SYNTH) {
12275  		*psval = data;
12276  		if (entry->flags & CNTR_32BIT) {
12277  			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12278  					     (data << 32) >> 32);
12279  			val = data; /* return the full 64bit value */
12280  		} else {
12281  			val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12282  					     data);
12283  		}
12284  	} else {
12285  		val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12286  	}
12287  
12288  	*psval = val;
12289  
12290  	hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12291  
12292  	return val;
12293  }
12294  
read_dev_cntr(struct hfi1_devdata * dd,int index,int vl)12295  u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12296  {
12297  	struct cntr_entry *entry;
12298  	u64 *sval;
12299  
12300  	entry = &dev_cntrs[index];
12301  	sval = dd->scntrs + entry->offset;
12302  
12303  	if (vl != CNTR_INVALID_VL)
12304  		sval += vl;
12305  
12306  	return read_dev_port_cntr(dd, entry, sval, dd, vl);
12307  }
12308  
write_dev_cntr(struct hfi1_devdata * dd,int index,int vl,u64 data)12309  u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12310  {
12311  	struct cntr_entry *entry;
12312  	u64 *sval;
12313  
12314  	entry = &dev_cntrs[index];
12315  	sval = dd->scntrs + entry->offset;
12316  
12317  	if (vl != CNTR_INVALID_VL)
12318  		sval += vl;
12319  
12320  	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12321  }
12322  
read_port_cntr(struct hfi1_pportdata * ppd,int index,int vl)12323  u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12324  {
12325  	struct cntr_entry *entry;
12326  	u64 *sval;
12327  
12328  	entry = &port_cntrs[index];
12329  	sval = ppd->scntrs + entry->offset;
12330  
12331  	if (vl != CNTR_INVALID_VL)
12332  		sval += vl;
12333  
12334  	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12335  	    (index <= C_RCV_HDR_OVF_LAST)) {
12336  		/* We do not want to bother for disabled contexts */
12337  		return 0;
12338  	}
12339  
12340  	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12341  }
12342  
write_port_cntr(struct hfi1_pportdata * ppd,int index,int vl,u64 data)12343  u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12344  {
12345  	struct cntr_entry *entry;
12346  	u64 *sval;
12347  
12348  	entry = &port_cntrs[index];
12349  	sval = ppd->scntrs + entry->offset;
12350  
12351  	if (vl != CNTR_INVALID_VL)
12352  		sval += vl;
12353  
12354  	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12355  	    (index <= C_RCV_HDR_OVF_LAST)) {
12356  		/* We do not want to bother for disabled contexts */
12357  		return 0;
12358  	}
12359  
12360  	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12361  }
12362  
do_update_synth_timer(struct work_struct * work)12363  static void do_update_synth_timer(struct work_struct *work)
12364  {
12365  	u64 cur_tx;
12366  	u64 cur_rx;
12367  	u64 total_flits;
12368  	u8 update = 0;
12369  	int i, j, vl;
12370  	struct hfi1_pportdata *ppd;
12371  	struct cntr_entry *entry;
12372  	struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12373  					       update_cntr_work);
12374  
12375  	/*
12376  	 * Rather than keep beating on the CSRs pick a minimal set that we can
12377  	 * check to watch for potential roll over. We can do this by looking at
12378  	 * the number of flits sent/recv. If the total flits exceeds 32bits then
12379  	 * we have to iterate all the counters and update.
12380  	 */
12381  	entry = &dev_cntrs[C_DC_RCV_FLITS];
12382  	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12383  
12384  	entry = &dev_cntrs[C_DC_XMIT_FLITS];
12385  	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12386  
12387  	hfi1_cdbg(
12388  	    CNTR,
12389  	    "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12390  	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12391  
12392  	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12393  		/*
12394  		 * May not be strictly necessary to update but it won't hurt and
12395  		 * simplifies the logic here.
12396  		 */
12397  		update = 1;
12398  		hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12399  			  dd->unit);
12400  	} else {
12401  		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12402  		hfi1_cdbg(CNTR,
12403  			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12404  			  total_flits, (u64)CNTR_32BIT_MAX);
12405  		if (total_flits >= CNTR_32BIT_MAX) {
12406  			hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12407  				  dd->unit);
12408  			update = 1;
12409  		}
12410  	}
12411  
12412  	if (update) {
12413  		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12414  		for (i = 0; i < DEV_CNTR_LAST; i++) {
12415  			entry = &dev_cntrs[i];
12416  			if (entry->flags & CNTR_VL) {
12417  				for (vl = 0; vl < C_VL_COUNT; vl++)
12418  					read_dev_cntr(dd, i, vl);
12419  			} else {
12420  				read_dev_cntr(dd, i, CNTR_INVALID_VL);
12421  			}
12422  		}
12423  		ppd = (struct hfi1_pportdata *)(dd + 1);
12424  		for (i = 0; i < dd->num_pports; i++, ppd++) {
12425  			for (j = 0; j < PORT_CNTR_LAST; j++) {
12426  				entry = &port_cntrs[j];
12427  				if (entry->flags & CNTR_VL) {
12428  					for (vl = 0; vl < C_VL_COUNT; vl++)
12429  						read_port_cntr(ppd, j, vl);
12430  				} else {
12431  					read_port_cntr(ppd, j, CNTR_INVALID_VL);
12432  				}
12433  			}
12434  		}
12435  
12436  		/*
12437  		 * We want the value in the register. The goal is to keep track
12438  		 * of the number of "ticks" not the counter value. In other
12439  		 * words if the register rolls we want to notice it and go ahead
12440  		 * and force an update.
12441  		 */
12442  		entry = &dev_cntrs[C_DC_XMIT_FLITS];
12443  		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12444  						CNTR_MODE_R, 0);
12445  
12446  		entry = &dev_cntrs[C_DC_RCV_FLITS];
12447  		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12448  						CNTR_MODE_R, 0);
12449  
12450  		hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12451  			  dd->unit, dd->last_tx, dd->last_rx);
12452  
12453  	} else {
12454  		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12455  	}
12456  }
12457  
update_synth_timer(struct timer_list * t)12458  static void update_synth_timer(struct timer_list *t)
12459  {
12460  	struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12461  
12462  	queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12463  	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12464  }
12465  
12466  #define C_MAX_NAME 16 /* 15 chars + one for /0 */
init_cntrs(struct hfi1_devdata * dd)12467  static int init_cntrs(struct hfi1_devdata *dd)
12468  {
12469  	int i, rcv_ctxts, j;
12470  	size_t sz;
12471  	char *p;
12472  	char name[C_MAX_NAME];
12473  	struct hfi1_pportdata *ppd;
12474  	const char *bit_type_32 = ",32";
12475  	const int bit_type_32_sz = strlen(bit_type_32);
12476  	u32 sdma_engines = chip_sdma_engines(dd);
12477  
12478  	/* set up the stats timer; the add_timer is done at the end */
12479  	timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12480  
12481  	/***********************/
12482  	/* per device counters */
12483  	/***********************/
12484  
12485  	/* size names and determine how many we have*/
12486  	dd->ndevcntrs = 0;
12487  	sz = 0;
12488  
12489  	for (i = 0; i < DEV_CNTR_LAST; i++) {
12490  		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12491  			hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12492  			continue;
12493  		}
12494  
12495  		if (dev_cntrs[i].flags & CNTR_VL) {
12496  			dev_cntrs[i].offset = dd->ndevcntrs;
12497  			for (j = 0; j < C_VL_COUNT; j++) {
12498  				snprintf(name, C_MAX_NAME, "%s%d",
12499  					 dev_cntrs[i].name, vl_from_idx(j));
12500  				sz += strlen(name);
12501  				/* Add ",32" for 32-bit counters */
12502  				if (dev_cntrs[i].flags & CNTR_32BIT)
12503  					sz += bit_type_32_sz;
12504  				sz++;
12505  				dd->ndevcntrs++;
12506  			}
12507  		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12508  			dev_cntrs[i].offset = dd->ndevcntrs;
12509  			for (j = 0; j < sdma_engines; j++) {
12510  				snprintf(name, C_MAX_NAME, "%s%d",
12511  					 dev_cntrs[i].name, j);
12512  				sz += strlen(name);
12513  				/* Add ",32" for 32-bit counters */
12514  				if (dev_cntrs[i].flags & CNTR_32BIT)
12515  					sz += bit_type_32_sz;
12516  				sz++;
12517  				dd->ndevcntrs++;
12518  			}
12519  		} else {
12520  			/* +1 for newline. */
12521  			sz += strlen(dev_cntrs[i].name) + 1;
12522  			/* Add ",32" for 32-bit counters */
12523  			if (dev_cntrs[i].flags & CNTR_32BIT)
12524  				sz += bit_type_32_sz;
12525  			dev_cntrs[i].offset = dd->ndevcntrs;
12526  			dd->ndevcntrs++;
12527  		}
12528  	}
12529  
12530  	/* allocate space for the counter values */
12531  	dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12532  			    GFP_KERNEL);
12533  	if (!dd->cntrs)
12534  		goto bail;
12535  
12536  	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12537  	if (!dd->scntrs)
12538  		goto bail;
12539  
12540  	/* allocate space for the counter names */
12541  	dd->cntrnameslen = sz;
12542  	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12543  	if (!dd->cntrnames)
12544  		goto bail;
12545  
12546  	/* fill in the names */
12547  	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12548  		if (dev_cntrs[i].flags & CNTR_DISABLED) {
12549  			/* Nothing */
12550  		} else if (dev_cntrs[i].flags & CNTR_VL) {
12551  			for (j = 0; j < C_VL_COUNT; j++) {
12552  				snprintf(name, C_MAX_NAME, "%s%d",
12553  					 dev_cntrs[i].name,
12554  					 vl_from_idx(j));
12555  				memcpy(p, name, strlen(name));
12556  				p += strlen(name);
12557  
12558  				/* Counter is 32 bits */
12559  				if (dev_cntrs[i].flags & CNTR_32BIT) {
12560  					memcpy(p, bit_type_32, bit_type_32_sz);
12561  					p += bit_type_32_sz;
12562  				}
12563  
12564  				*p++ = '\n';
12565  			}
12566  		} else if (dev_cntrs[i].flags & CNTR_SDMA) {
12567  			for (j = 0; j < sdma_engines; j++) {
12568  				snprintf(name, C_MAX_NAME, "%s%d",
12569  					 dev_cntrs[i].name, j);
12570  				memcpy(p, name, strlen(name));
12571  				p += strlen(name);
12572  
12573  				/* Counter is 32 bits */
12574  				if (dev_cntrs[i].flags & CNTR_32BIT) {
12575  					memcpy(p, bit_type_32, bit_type_32_sz);
12576  					p += bit_type_32_sz;
12577  				}
12578  
12579  				*p++ = '\n';
12580  			}
12581  		} else {
12582  			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12583  			p += strlen(dev_cntrs[i].name);
12584  
12585  			/* Counter is 32 bits */
12586  			if (dev_cntrs[i].flags & CNTR_32BIT) {
12587  				memcpy(p, bit_type_32, bit_type_32_sz);
12588  				p += bit_type_32_sz;
12589  			}
12590  
12591  			*p++ = '\n';
12592  		}
12593  	}
12594  
12595  	/*********************/
12596  	/* per port counters */
12597  	/*********************/
12598  
12599  	/*
12600  	 * Go through the counters for the overflows and disable the ones we
12601  	 * don't need. This varies based on platform so we need to do it
12602  	 * dynamically here.
12603  	 */
12604  	rcv_ctxts = dd->num_rcv_contexts;
12605  	for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12606  	     i <= C_RCV_HDR_OVF_LAST; i++) {
12607  		port_cntrs[i].flags |= CNTR_DISABLED;
12608  	}
12609  
12610  	/* size port counter names and determine how many we have*/
12611  	sz = 0;
12612  	dd->nportcntrs = 0;
12613  	for (i = 0; i < PORT_CNTR_LAST; i++) {
12614  		if (port_cntrs[i].flags & CNTR_DISABLED) {
12615  			hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12616  			continue;
12617  		}
12618  
12619  		if (port_cntrs[i].flags & CNTR_VL) {
12620  			port_cntrs[i].offset = dd->nportcntrs;
12621  			for (j = 0; j < C_VL_COUNT; j++) {
12622  				snprintf(name, C_MAX_NAME, "%s%d",
12623  					 port_cntrs[i].name, vl_from_idx(j));
12624  				sz += strlen(name);
12625  				/* Add ",32" for 32-bit counters */
12626  				if (port_cntrs[i].flags & CNTR_32BIT)
12627  					sz += bit_type_32_sz;
12628  				sz++;
12629  				dd->nportcntrs++;
12630  			}
12631  		} else {
12632  			/* +1 for newline */
12633  			sz += strlen(port_cntrs[i].name) + 1;
12634  			/* Add ",32" for 32-bit counters */
12635  			if (port_cntrs[i].flags & CNTR_32BIT)
12636  				sz += bit_type_32_sz;
12637  			port_cntrs[i].offset = dd->nportcntrs;
12638  			dd->nportcntrs++;
12639  		}
12640  	}
12641  
12642  	/* allocate space for the counter names */
12643  	dd->portcntrnameslen = sz;
12644  	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12645  	if (!dd->portcntrnames)
12646  		goto bail;
12647  
12648  	/* fill in port cntr names */
12649  	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12650  		if (port_cntrs[i].flags & CNTR_DISABLED)
12651  			continue;
12652  
12653  		if (port_cntrs[i].flags & CNTR_VL) {
12654  			for (j = 0; j < C_VL_COUNT; j++) {
12655  				snprintf(name, C_MAX_NAME, "%s%d",
12656  					 port_cntrs[i].name, vl_from_idx(j));
12657  				memcpy(p, name, strlen(name));
12658  				p += strlen(name);
12659  
12660  				/* Counter is 32 bits */
12661  				if (port_cntrs[i].flags & CNTR_32BIT) {
12662  					memcpy(p, bit_type_32, bit_type_32_sz);
12663  					p += bit_type_32_sz;
12664  				}
12665  
12666  				*p++ = '\n';
12667  			}
12668  		} else {
12669  			memcpy(p, port_cntrs[i].name,
12670  			       strlen(port_cntrs[i].name));
12671  			p += strlen(port_cntrs[i].name);
12672  
12673  			/* Counter is 32 bits */
12674  			if (port_cntrs[i].flags & CNTR_32BIT) {
12675  				memcpy(p, bit_type_32, bit_type_32_sz);
12676  				p += bit_type_32_sz;
12677  			}
12678  
12679  			*p++ = '\n';
12680  		}
12681  	}
12682  
12683  	/* allocate per port storage for counter values */
12684  	ppd = (struct hfi1_pportdata *)(dd + 1);
12685  	for (i = 0; i < dd->num_pports; i++, ppd++) {
12686  		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12687  		if (!ppd->cntrs)
12688  			goto bail;
12689  
12690  		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12691  		if (!ppd->scntrs)
12692  			goto bail;
12693  	}
12694  
12695  	/* CPU counters need to be allocated and zeroed */
12696  	if (init_cpu_counters(dd))
12697  		goto bail;
12698  
12699  	dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12700  						     WQ_MEM_RECLAIM, dd->unit);
12701  	if (!dd->update_cntr_wq)
12702  		goto bail;
12703  
12704  	INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12705  
12706  	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12707  	return 0;
12708  bail:
12709  	free_cntrs(dd);
12710  	return -ENOMEM;
12711  }
12712  
chip_to_opa_lstate(struct hfi1_devdata * dd,u32 chip_lstate)12713  static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12714  {
12715  	switch (chip_lstate) {
12716  	default:
12717  		dd_dev_err(dd,
12718  			   "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12719  			   chip_lstate);
12720  		/* fall through */
12721  	case LSTATE_DOWN:
12722  		return IB_PORT_DOWN;
12723  	case LSTATE_INIT:
12724  		return IB_PORT_INIT;
12725  	case LSTATE_ARMED:
12726  		return IB_PORT_ARMED;
12727  	case LSTATE_ACTIVE:
12728  		return IB_PORT_ACTIVE;
12729  	}
12730  }
12731  
chip_to_opa_pstate(struct hfi1_devdata * dd,u32 chip_pstate)12732  u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12733  {
12734  	/* look at the HFI meta-states only */
12735  	switch (chip_pstate & 0xf0) {
12736  	default:
12737  		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12738  			   chip_pstate);
12739  		/* fall through */
12740  	case PLS_DISABLED:
12741  		return IB_PORTPHYSSTATE_DISABLED;
12742  	case PLS_OFFLINE:
12743  		return OPA_PORTPHYSSTATE_OFFLINE;
12744  	case PLS_POLLING:
12745  		return IB_PORTPHYSSTATE_POLLING;
12746  	case PLS_CONFIGPHY:
12747  		return IB_PORTPHYSSTATE_TRAINING;
12748  	case PLS_LINKUP:
12749  		return IB_PORTPHYSSTATE_LINKUP;
12750  	case PLS_PHYTEST:
12751  		return IB_PORTPHYSSTATE_PHY_TEST;
12752  	}
12753  }
12754  
12755  /* return the OPA port logical state name */
opa_lstate_name(u32 lstate)12756  const char *opa_lstate_name(u32 lstate)
12757  {
12758  	static const char * const port_logical_names[] = {
12759  		"PORT_NOP",
12760  		"PORT_DOWN",
12761  		"PORT_INIT",
12762  		"PORT_ARMED",
12763  		"PORT_ACTIVE",
12764  		"PORT_ACTIVE_DEFER",
12765  	};
12766  	if (lstate < ARRAY_SIZE(port_logical_names))
12767  		return port_logical_names[lstate];
12768  	return "unknown";
12769  }
12770  
12771  /* return the OPA port physical state name */
opa_pstate_name(u32 pstate)12772  const char *opa_pstate_name(u32 pstate)
12773  {
12774  	static const char * const port_physical_names[] = {
12775  		"PHYS_NOP",
12776  		"reserved1",
12777  		"PHYS_POLL",
12778  		"PHYS_DISABLED",
12779  		"PHYS_TRAINING",
12780  		"PHYS_LINKUP",
12781  		"PHYS_LINK_ERR_RECOVER",
12782  		"PHYS_PHY_TEST",
12783  		"reserved8",
12784  		"PHYS_OFFLINE",
12785  		"PHYS_GANGED",
12786  		"PHYS_TEST",
12787  	};
12788  	if (pstate < ARRAY_SIZE(port_physical_names))
12789  		return port_physical_names[pstate];
12790  	return "unknown";
12791  }
12792  
12793  /**
12794   * update_statusp - Update userspace status flag
12795   * @ppd: Port data structure
12796   * @state: port state information
12797   *
12798   * Actual port status is determined by the host_link_state value
12799   * in the ppd.
12800   *
12801   * host_link_state MUST be updated before updating the user space
12802   * statusp.
12803   */
update_statusp(struct hfi1_pportdata * ppd,u32 state)12804  static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12805  {
12806  	/*
12807  	 * Set port status flags in the page mapped into userspace
12808  	 * memory. Do it here to ensure a reliable state - this is
12809  	 * the only function called by all state handling code.
12810  	 * Always set the flags due to the fact that the cache value
12811  	 * might have been changed explicitly outside of this
12812  	 * function.
12813  	 */
12814  	if (ppd->statusp) {
12815  		switch (state) {
12816  		case IB_PORT_DOWN:
12817  		case IB_PORT_INIT:
12818  			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12819  					   HFI1_STATUS_IB_READY);
12820  			break;
12821  		case IB_PORT_ARMED:
12822  			*ppd->statusp |= HFI1_STATUS_IB_CONF;
12823  			break;
12824  		case IB_PORT_ACTIVE:
12825  			*ppd->statusp |= HFI1_STATUS_IB_READY;
12826  			break;
12827  		}
12828  	}
12829  	dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12830  		    opa_lstate_name(state), state);
12831  }
12832  
12833  /**
12834   * wait_logical_linkstate - wait for an IB link state change to occur
12835   * @ppd: port device
12836   * @state: the state to wait for
12837   * @msecs: the number of milliseconds to wait
12838   *
12839   * Wait up to msecs milliseconds for IB link state change to occur.
12840   * For now, take the easy polling route.
12841   * Returns 0 if state reached, otherwise -ETIMEDOUT.
12842   */
wait_logical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)12843  static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12844  				  int msecs)
12845  {
12846  	unsigned long timeout;
12847  	u32 new_state;
12848  
12849  	timeout = jiffies + msecs_to_jiffies(msecs);
12850  	while (1) {
12851  		new_state = chip_to_opa_lstate(ppd->dd,
12852  					       read_logical_state(ppd->dd));
12853  		if (new_state == state)
12854  			break;
12855  		if (time_after(jiffies, timeout)) {
12856  			dd_dev_err(ppd->dd,
12857  				   "timeout waiting for link state 0x%x\n",
12858  				   state);
12859  			return -ETIMEDOUT;
12860  		}
12861  		msleep(20);
12862  	}
12863  
12864  	return 0;
12865  }
12866  
log_state_transition(struct hfi1_pportdata * ppd,u32 state)12867  static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12868  {
12869  	u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12870  
12871  	dd_dev_info(ppd->dd,
12872  		    "physical state changed to %s (0x%x), phy 0x%x\n",
12873  		    opa_pstate_name(ib_pstate), ib_pstate, state);
12874  }
12875  
12876  /*
12877   * Read the physical hardware link state and check if it matches host
12878   * drivers anticipated state.
12879   */
log_physical_state(struct hfi1_pportdata * ppd,u32 state)12880  static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12881  {
12882  	u32 read_state = read_physical_state(ppd->dd);
12883  
12884  	if (read_state == state) {
12885  		log_state_transition(ppd, state);
12886  	} else {
12887  		dd_dev_err(ppd->dd,
12888  			   "anticipated phy link state 0x%x, read 0x%x\n",
12889  			   state, read_state);
12890  	}
12891  }
12892  
12893  /*
12894   * wait_physical_linkstate - wait for an physical link state change to occur
12895   * @ppd: port device
12896   * @state: the state to wait for
12897   * @msecs: the number of milliseconds to wait
12898   *
12899   * Wait up to msecs milliseconds for physical link state change to occur.
12900   * Returns 0 if state reached, otherwise -ETIMEDOUT.
12901   */
wait_physical_linkstate(struct hfi1_pportdata * ppd,u32 state,int msecs)12902  static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12903  				   int msecs)
12904  {
12905  	u32 read_state;
12906  	unsigned long timeout;
12907  
12908  	timeout = jiffies + msecs_to_jiffies(msecs);
12909  	while (1) {
12910  		read_state = read_physical_state(ppd->dd);
12911  		if (read_state == state)
12912  			break;
12913  		if (time_after(jiffies, timeout)) {
12914  			dd_dev_err(ppd->dd,
12915  				   "timeout waiting for phy link state 0x%x\n",
12916  				   state);
12917  			return -ETIMEDOUT;
12918  		}
12919  		usleep_range(1950, 2050); /* sleep 2ms-ish */
12920  	}
12921  
12922  	log_state_transition(ppd, state);
12923  	return 0;
12924  }
12925  
12926  /*
12927   * wait_phys_link_offline_quiet_substates - wait for any offline substate
12928   * @ppd: port device
12929   * @msecs: the number of milliseconds to wait
12930   *
12931   * Wait up to msecs milliseconds for any offline physical link
12932   * state change to occur.
12933   * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12934   */
wait_phys_link_offline_substates(struct hfi1_pportdata * ppd,int msecs)12935  static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12936  					    int msecs)
12937  {
12938  	u32 read_state;
12939  	unsigned long timeout;
12940  
12941  	timeout = jiffies + msecs_to_jiffies(msecs);
12942  	while (1) {
12943  		read_state = read_physical_state(ppd->dd);
12944  		if ((read_state & 0xF0) == PLS_OFFLINE)
12945  			break;
12946  		if (time_after(jiffies, timeout)) {
12947  			dd_dev_err(ppd->dd,
12948  				   "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12949  				   read_state, msecs);
12950  			return -ETIMEDOUT;
12951  		}
12952  		usleep_range(1950, 2050); /* sleep 2ms-ish */
12953  	}
12954  
12955  	log_state_transition(ppd, read_state);
12956  	return read_state;
12957  }
12958  
12959  /*
12960   * wait_phys_link_out_of_offline - wait for any out of offline state
12961   * @ppd: port device
12962   * @msecs: the number of milliseconds to wait
12963   *
12964   * Wait up to msecs milliseconds for any out of offline physical link
12965   * state change to occur.
12966   * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12967   */
wait_phys_link_out_of_offline(struct hfi1_pportdata * ppd,int msecs)12968  static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12969  					 int msecs)
12970  {
12971  	u32 read_state;
12972  	unsigned long timeout;
12973  
12974  	timeout = jiffies + msecs_to_jiffies(msecs);
12975  	while (1) {
12976  		read_state = read_physical_state(ppd->dd);
12977  		if ((read_state & 0xF0) != PLS_OFFLINE)
12978  			break;
12979  		if (time_after(jiffies, timeout)) {
12980  			dd_dev_err(ppd->dd,
12981  				   "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12982  				   read_state, msecs);
12983  			return -ETIMEDOUT;
12984  		}
12985  		usleep_range(1950, 2050); /* sleep 2ms-ish */
12986  	}
12987  
12988  	log_state_transition(ppd, read_state);
12989  	return read_state;
12990  }
12991  
12992  #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12993  (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12994  
12995  #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12996  (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12997  
hfi1_init_ctxt(struct send_context * sc)12998  void hfi1_init_ctxt(struct send_context *sc)
12999  {
13000  	if (sc) {
13001  		struct hfi1_devdata *dd = sc->dd;
13002  		u64 reg;
13003  		u8 set = (sc->type == SC_USER ?
13004  			  HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13005  			  HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13006  		reg = read_kctxt_csr(dd, sc->hw_context,
13007  				     SEND_CTXT_CHECK_ENABLE);
13008  		if (set)
13009  			CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13010  		else
13011  			SET_STATIC_RATE_CONTROL_SMASK(reg);
13012  		write_kctxt_csr(dd, sc->hw_context,
13013  				SEND_CTXT_CHECK_ENABLE, reg);
13014  	}
13015  }
13016  
hfi1_tempsense_rd(struct hfi1_devdata * dd,struct hfi1_temp * temp)13017  int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13018  {
13019  	int ret = 0;
13020  	u64 reg;
13021  
13022  	if (dd->icode != ICODE_RTL_SILICON) {
13023  		if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13024  			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13025  				    __func__);
13026  		return -EINVAL;
13027  	}
13028  	reg = read_csr(dd, ASIC_STS_THERM);
13029  	temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13030  		      ASIC_STS_THERM_CURR_TEMP_MASK);
13031  	temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13032  			ASIC_STS_THERM_LO_TEMP_MASK);
13033  	temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13034  			ASIC_STS_THERM_HI_TEMP_MASK);
13035  	temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13036  			  ASIC_STS_THERM_CRIT_TEMP_MASK);
13037  	/* triggers is a 3-bit value - 1 bit per trigger. */
13038  	temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13039  
13040  	return ret;
13041  }
13042  
13043  /* ========================================================================= */
13044  
13045  /**
13046   * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13047   * @dd: valid devdata
13048   * @src: IRQ source to determine register index from
13049   * @bits: the bits to set or clear
13050   * @set: true == set the bits, false == clear the bits
13051   *
13052   */
read_mod_write(struct hfi1_devdata * dd,u16 src,u64 bits,bool set)13053  static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13054  			   bool set)
13055  {
13056  	u64 reg;
13057  	u16 idx = src / BITS_PER_REGISTER;
13058  
13059  	spin_lock(&dd->irq_src_lock);
13060  	reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13061  	if (set)
13062  		reg |= bits;
13063  	else
13064  		reg &= ~bits;
13065  	write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13066  	spin_unlock(&dd->irq_src_lock);
13067  }
13068  
13069  /**
13070   * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13071   * @dd: valid devdata
13072   * @first: first IRQ source to set/clear
13073   * @last: last IRQ source (inclusive) to set/clear
13074   * @set: true == set the bits, false == clear the bits
13075   *
13076   * If first == last, set the exact source.
13077   */
set_intr_bits(struct hfi1_devdata * dd,u16 first,u16 last,bool set)13078  int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13079  {
13080  	u64 bits = 0;
13081  	u64 bit;
13082  	u16 src;
13083  
13084  	if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13085  		return -EINVAL;
13086  
13087  	if (last < first)
13088  		return -ERANGE;
13089  
13090  	for (src = first; src <= last; src++) {
13091  		bit = src % BITS_PER_REGISTER;
13092  		/* wrapped to next register? */
13093  		if (!bit && bits) {
13094  			read_mod_write(dd, src - 1, bits, set);
13095  			bits = 0;
13096  		}
13097  		bits |= BIT_ULL(bit);
13098  	}
13099  	read_mod_write(dd, last, bits, set);
13100  
13101  	return 0;
13102  }
13103  
13104  /*
13105   * Clear all interrupt sources on the chip.
13106   */
clear_all_interrupts(struct hfi1_devdata * dd)13107  void clear_all_interrupts(struct hfi1_devdata *dd)
13108  {
13109  	int i;
13110  
13111  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13112  		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13113  
13114  	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13115  	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13116  	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13117  	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13118  	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13119  	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13120  	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13121  	for (i = 0; i < chip_send_contexts(dd); i++)
13122  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13123  	for (i = 0; i < chip_sdma_engines(dd); i++)
13124  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13125  
13126  	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13127  	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13128  	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13129  }
13130  
13131  /*
13132   * Remap the interrupt source from the general handler to the given MSI-X
13133   * interrupt.
13134   */
remap_intr(struct hfi1_devdata * dd,int isrc,int msix_intr)13135  void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13136  {
13137  	u64 reg;
13138  	int m, n;
13139  
13140  	/* clear from the handled mask of the general interrupt */
13141  	m = isrc / 64;
13142  	n = isrc % 64;
13143  	if (likely(m < CCE_NUM_INT_CSRS)) {
13144  		dd->gi_mask[m] &= ~((u64)1 << n);
13145  	} else {
13146  		dd_dev_err(dd, "remap interrupt err\n");
13147  		return;
13148  	}
13149  
13150  	/* direct the chip source to the given MSI-X interrupt */
13151  	m = isrc / 8;
13152  	n = isrc % 8;
13153  	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13154  	reg &= ~((u64)0xff << (8 * n));
13155  	reg |= ((u64)msix_intr & 0xff) << (8 * n);
13156  	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13157  }
13158  
remap_sdma_interrupts(struct hfi1_devdata * dd,int engine,int msix_intr)13159  void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13160  {
13161  	/*
13162  	 * SDMA engine interrupt sources grouped by type, rather than
13163  	 * engine.  Per-engine interrupts are as follows:
13164  	 *	SDMA
13165  	 *	SDMAProgress
13166  	 *	SDMAIdle
13167  	 */
13168  	remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13169  	remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13170  	remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13171  }
13172  
13173  /*
13174   * Set the general handler to accept all interrupts, remap all
13175   * chip interrupts back to MSI-X 0.
13176   */
reset_interrupts(struct hfi1_devdata * dd)13177  void reset_interrupts(struct hfi1_devdata *dd)
13178  {
13179  	int i;
13180  
13181  	/* all interrupts handled by the general handler */
13182  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13183  		dd->gi_mask[i] = ~(u64)0;
13184  
13185  	/* all chip interrupts map to MSI-X 0 */
13186  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13187  		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13188  }
13189  
13190  /**
13191   * set_up_interrupts() - Initialize the IRQ resources and state
13192   * @dd: valid devdata
13193   *
13194   */
set_up_interrupts(struct hfi1_devdata * dd)13195  static int set_up_interrupts(struct hfi1_devdata *dd)
13196  {
13197  	int ret;
13198  
13199  	/* mask all interrupts */
13200  	set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13201  
13202  	/* clear all pending interrupts */
13203  	clear_all_interrupts(dd);
13204  
13205  	/* reset general handler mask, chip MSI-X mappings */
13206  	reset_interrupts(dd);
13207  
13208  	/* ask for MSI-X interrupts */
13209  	ret = msix_initialize(dd);
13210  	if (ret)
13211  		return ret;
13212  
13213  	ret = msix_request_irqs(dd);
13214  	if (ret)
13215  		msix_clean_up_interrupts(dd);
13216  
13217  	return ret;
13218  }
13219  
13220  /*
13221   * Set up context values in dd.  Sets:
13222   *
13223   *	num_rcv_contexts - number of contexts being used
13224   *	n_krcv_queues - number of kernel contexts
13225   *	first_dyn_alloc_ctxt - first dynamically allocated context
13226   *                             in array of contexts
13227   *	freectxts  - number of free user contexts
13228   *	num_send_contexts - number of PIO send contexts being used
13229   *	num_vnic_contexts - number of contexts reserved for VNIC
13230   */
set_up_context_variables(struct hfi1_devdata * dd)13231  static int set_up_context_variables(struct hfi1_devdata *dd)
13232  {
13233  	unsigned long num_kernel_contexts;
13234  	u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13235  	int total_contexts;
13236  	int ret;
13237  	unsigned ngroups;
13238  	int rmt_count;
13239  	int user_rmt_reduced;
13240  	u32 n_usr_ctxts;
13241  	u32 send_contexts = chip_send_contexts(dd);
13242  	u32 rcv_contexts = chip_rcv_contexts(dd);
13243  
13244  	/*
13245  	 * Kernel receive contexts:
13246  	 * - Context 0 - control context (VL15/multicast/error)
13247  	 * - Context 1 - first kernel context
13248  	 * - Context 2 - second kernel context
13249  	 * ...
13250  	 */
13251  	if (n_krcvqs)
13252  		/*
13253  		 * n_krcvqs is the sum of module parameter kernel receive
13254  		 * contexts, krcvqs[].  It does not include the control
13255  		 * context, so add that.
13256  		 */
13257  		num_kernel_contexts = n_krcvqs + 1;
13258  	else
13259  		num_kernel_contexts = DEFAULT_KRCVQS + 1;
13260  	/*
13261  	 * Every kernel receive context needs an ACK send context.
13262  	 * one send context is allocated for each VL{0-7} and VL15
13263  	 */
13264  	if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13265  		dd_dev_err(dd,
13266  			   "Reducing # kernel rcv contexts to: %d, from %lu\n",
13267  			   send_contexts - num_vls - 1,
13268  			   num_kernel_contexts);
13269  		num_kernel_contexts = send_contexts - num_vls - 1;
13270  	}
13271  
13272  	/* Accommodate VNIC contexts if possible */
13273  	if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13274  		dd_dev_err(dd, "No receive contexts available for VNIC\n");
13275  		num_vnic_contexts = 0;
13276  	}
13277  	total_contexts = num_kernel_contexts + num_vnic_contexts;
13278  
13279  	/*
13280  	 * User contexts:
13281  	 *	- default to 1 user context per real (non-HT) CPU core if
13282  	 *	  num_user_contexts is negative
13283  	 */
13284  	if (num_user_contexts < 0)
13285  		n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13286  	else
13287  		n_usr_ctxts = num_user_contexts;
13288  	/*
13289  	 * Adjust the counts given a global max.
13290  	 */
13291  	if (total_contexts + n_usr_ctxts > rcv_contexts) {
13292  		dd_dev_err(dd,
13293  			   "Reducing # user receive contexts to: %d, from %u\n",
13294  			   rcv_contexts - total_contexts,
13295  			   n_usr_ctxts);
13296  		/* recalculate */
13297  		n_usr_ctxts = rcv_contexts - total_contexts;
13298  	}
13299  
13300  	/*
13301  	 * The RMT entries are currently allocated as shown below:
13302  	 * 1. QOS (0 to 128 entries);
13303  	 * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13304  	 *    num_vnic_contexts);
13305  	 * 3. VNIC (num_vnic_contexts).
13306  	 * It should be noted that FECN oversubscribe num_vnic_contexts
13307  	 * entries of RMT because both VNIC and PSM could allocate any receive
13308  	 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13309  	 * and PSM FECN must reserve an RMT entry for each possible PSM receive
13310  	 * context.
13311  	 */
13312  	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13313  	if (HFI1_CAP_IS_KSET(TID_RDMA))
13314  		rmt_count += num_kernel_contexts - 1;
13315  	if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13316  		user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13317  		dd_dev_err(dd,
13318  			   "RMT size is reducing the number of user receive contexts from %u to %d\n",
13319  			   n_usr_ctxts,
13320  			   user_rmt_reduced);
13321  		/* recalculate */
13322  		n_usr_ctxts = user_rmt_reduced;
13323  	}
13324  
13325  	total_contexts += n_usr_ctxts;
13326  
13327  	/* the first N are kernel contexts, the rest are user/vnic contexts */
13328  	dd->num_rcv_contexts = total_contexts;
13329  	dd->n_krcv_queues = num_kernel_contexts;
13330  	dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13331  	dd->num_vnic_contexts = num_vnic_contexts;
13332  	dd->num_user_contexts = n_usr_ctxts;
13333  	dd->freectxts = n_usr_ctxts;
13334  	dd_dev_info(dd,
13335  		    "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13336  		    rcv_contexts,
13337  		    (int)dd->num_rcv_contexts,
13338  		    (int)dd->n_krcv_queues,
13339  		    dd->num_vnic_contexts,
13340  		    dd->num_user_contexts);
13341  
13342  	/*
13343  	 * Receive array allocation:
13344  	 *   All RcvArray entries are divided into groups of 8. This
13345  	 *   is required by the hardware and will speed up writes to
13346  	 *   consecutive entries by using write-combining of the entire
13347  	 *   cacheline.
13348  	 *
13349  	 *   The number of groups are evenly divided among all contexts.
13350  	 *   any left over groups will be given to the first N user
13351  	 *   contexts.
13352  	 */
13353  	dd->rcv_entries.group_size = RCV_INCREMENT;
13354  	ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13355  	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13356  	dd->rcv_entries.nctxt_extra = ngroups -
13357  		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13358  	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13359  		    dd->rcv_entries.ngroups,
13360  		    dd->rcv_entries.nctxt_extra);
13361  	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13362  	    MAX_EAGER_ENTRIES * 2) {
13363  		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13364  			dd->rcv_entries.group_size;
13365  		dd_dev_info(dd,
13366  			    "RcvArray group count too high, change to %u\n",
13367  			    dd->rcv_entries.ngroups);
13368  		dd->rcv_entries.nctxt_extra = 0;
13369  	}
13370  	/*
13371  	 * PIO send contexts
13372  	 */
13373  	ret = init_sc_pools_and_sizes(dd);
13374  	if (ret >= 0) {	/* success */
13375  		dd->num_send_contexts = ret;
13376  		dd_dev_info(
13377  			dd,
13378  			"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13379  			send_contexts,
13380  			dd->num_send_contexts,
13381  			dd->sc_sizes[SC_KERNEL].count,
13382  			dd->sc_sizes[SC_ACK].count,
13383  			dd->sc_sizes[SC_USER].count,
13384  			dd->sc_sizes[SC_VL15].count);
13385  		ret = 0;	/* success */
13386  	}
13387  
13388  	return ret;
13389  }
13390  
13391  /*
13392   * Set the device/port partition key table. The MAD code
13393   * will ensure that, at least, the partial management
13394   * partition key is present in the table.
13395   */
set_partition_keys(struct hfi1_pportdata * ppd)13396  static void set_partition_keys(struct hfi1_pportdata *ppd)
13397  {
13398  	struct hfi1_devdata *dd = ppd->dd;
13399  	u64 reg = 0;
13400  	int i;
13401  
13402  	dd_dev_info(dd, "Setting partition keys\n");
13403  	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13404  		reg |= (ppd->pkeys[i] &
13405  			RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13406  			((i % 4) *
13407  			 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13408  		/* Each register holds 4 PKey values. */
13409  		if ((i % 4) == 3) {
13410  			write_csr(dd, RCV_PARTITION_KEY +
13411  				  ((i - 3) * 2), reg);
13412  			reg = 0;
13413  		}
13414  	}
13415  
13416  	/* Always enable HW pkeys check when pkeys table is set */
13417  	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13418  }
13419  
13420  /*
13421   * These CSRs and memories are uninitialized on reset and must be
13422   * written before reading to set the ECC/parity bits.
13423   *
13424   * NOTE: All user context CSRs that are not mmaped write-only
13425   * (e.g. the TID flows) must be initialized even if the driver never
13426   * reads them.
13427   */
write_uninitialized_csrs_and_memories(struct hfi1_devdata * dd)13428  static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13429  {
13430  	int i, j;
13431  
13432  	/* CceIntMap */
13433  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13434  		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13435  
13436  	/* SendCtxtCreditReturnAddr */
13437  	for (i = 0; i < chip_send_contexts(dd); i++)
13438  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13439  
13440  	/* PIO Send buffers */
13441  	/* SDMA Send buffers */
13442  	/*
13443  	 * These are not normally read, and (presently) have no method
13444  	 * to be read, so are not pre-initialized
13445  	 */
13446  
13447  	/* RcvHdrAddr */
13448  	/* RcvHdrTailAddr */
13449  	/* RcvTidFlowTable */
13450  	for (i = 0; i < chip_rcv_contexts(dd); i++) {
13451  		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13452  		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13453  		for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13454  			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13455  	}
13456  
13457  	/* RcvArray */
13458  	for (i = 0; i < chip_rcv_array_count(dd); i++)
13459  		hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13460  
13461  	/* RcvQPMapTable */
13462  	for (i = 0; i < 32; i++)
13463  		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13464  }
13465  
13466  /*
13467   * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13468   */
clear_cce_status(struct hfi1_devdata * dd,u64 status_bits,u64 ctrl_bits)13469  static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13470  			     u64 ctrl_bits)
13471  {
13472  	unsigned long timeout;
13473  	u64 reg;
13474  
13475  	/* is the condition present? */
13476  	reg = read_csr(dd, CCE_STATUS);
13477  	if ((reg & status_bits) == 0)
13478  		return;
13479  
13480  	/* clear the condition */
13481  	write_csr(dd, CCE_CTRL, ctrl_bits);
13482  
13483  	/* wait for the condition to clear */
13484  	timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13485  	while (1) {
13486  		reg = read_csr(dd, CCE_STATUS);
13487  		if ((reg & status_bits) == 0)
13488  			return;
13489  		if (time_after(jiffies, timeout)) {
13490  			dd_dev_err(dd,
13491  				   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13492  				   status_bits, reg & status_bits);
13493  			return;
13494  		}
13495  		udelay(1);
13496  	}
13497  }
13498  
13499  /* set CCE CSRs to chip reset defaults */
reset_cce_csrs(struct hfi1_devdata * dd)13500  static void reset_cce_csrs(struct hfi1_devdata *dd)
13501  {
13502  	int i;
13503  
13504  	/* CCE_REVISION read-only */
13505  	/* CCE_REVISION2 read-only */
13506  	/* CCE_CTRL - bits clear automatically */
13507  	/* CCE_STATUS read-only, use CceCtrl to clear */
13508  	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13509  	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13510  	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13511  	for (i = 0; i < CCE_NUM_SCRATCH; i++)
13512  		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13513  	/* CCE_ERR_STATUS read-only */
13514  	write_csr(dd, CCE_ERR_MASK, 0);
13515  	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13516  	/* CCE_ERR_FORCE leave alone */
13517  	for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13518  		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13519  	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13520  	/* CCE_PCIE_CTRL leave alone */
13521  	for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13522  		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13523  		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13524  			  CCE_MSIX_TABLE_UPPER_RESETCSR);
13525  	}
13526  	for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13527  		/* CCE_MSIX_PBA read-only */
13528  		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13529  		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13530  	}
13531  	for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13532  		write_csr(dd, CCE_INT_MAP, 0);
13533  	for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13534  		/* CCE_INT_STATUS read-only */
13535  		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13536  		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13537  		/* CCE_INT_FORCE leave alone */
13538  		/* CCE_INT_BLOCKED read-only */
13539  	}
13540  	for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13541  		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13542  }
13543  
13544  /* set MISC CSRs to chip reset defaults */
reset_misc_csrs(struct hfi1_devdata * dd)13545  static void reset_misc_csrs(struct hfi1_devdata *dd)
13546  {
13547  	int i;
13548  
13549  	for (i = 0; i < 32; i++) {
13550  		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13551  		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13552  		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13553  	}
13554  	/*
13555  	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13556  	 * only be written 128-byte chunks
13557  	 */
13558  	/* init RSA engine to clear lingering errors */
13559  	write_csr(dd, MISC_CFG_RSA_CMD, 1);
13560  	write_csr(dd, MISC_CFG_RSA_MU, 0);
13561  	write_csr(dd, MISC_CFG_FW_CTRL, 0);
13562  	/* MISC_STS_8051_DIGEST read-only */
13563  	/* MISC_STS_SBM_DIGEST read-only */
13564  	/* MISC_STS_PCIE_DIGEST read-only */
13565  	/* MISC_STS_FAB_DIGEST read-only */
13566  	/* MISC_ERR_STATUS read-only */
13567  	write_csr(dd, MISC_ERR_MASK, 0);
13568  	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13569  	/* MISC_ERR_FORCE leave alone */
13570  }
13571  
13572  /* set TXE CSRs to chip reset defaults */
reset_txe_csrs(struct hfi1_devdata * dd)13573  static void reset_txe_csrs(struct hfi1_devdata *dd)
13574  {
13575  	int i;
13576  
13577  	/*
13578  	 * TXE Kernel CSRs
13579  	 */
13580  	write_csr(dd, SEND_CTRL, 0);
13581  	__cm_reset(dd, 0);	/* reset CM internal state */
13582  	/* SEND_CONTEXTS read-only */
13583  	/* SEND_DMA_ENGINES read-only */
13584  	/* SEND_PIO_MEM_SIZE read-only */
13585  	/* SEND_DMA_MEM_SIZE read-only */
13586  	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13587  	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
13588  	/* SEND_PIO_ERR_STATUS read-only */
13589  	write_csr(dd, SEND_PIO_ERR_MASK, 0);
13590  	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13591  	/* SEND_PIO_ERR_FORCE leave alone */
13592  	/* SEND_DMA_ERR_STATUS read-only */
13593  	write_csr(dd, SEND_DMA_ERR_MASK, 0);
13594  	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13595  	/* SEND_DMA_ERR_FORCE leave alone */
13596  	/* SEND_EGRESS_ERR_STATUS read-only */
13597  	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13598  	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13599  	/* SEND_EGRESS_ERR_FORCE leave alone */
13600  	write_csr(dd, SEND_BTH_QP, 0);
13601  	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13602  	write_csr(dd, SEND_SC2VLT0, 0);
13603  	write_csr(dd, SEND_SC2VLT1, 0);
13604  	write_csr(dd, SEND_SC2VLT2, 0);
13605  	write_csr(dd, SEND_SC2VLT3, 0);
13606  	write_csr(dd, SEND_LEN_CHECK0, 0);
13607  	write_csr(dd, SEND_LEN_CHECK1, 0);
13608  	/* SEND_ERR_STATUS read-only */
13609  	write_csr(dd, SEND_ERR_MASK, 0);
13610  	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13611  	/* SEND_ERR_FORCE read-only */
13612  	for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13613  		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13614  	for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13615  		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13616  	for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13617  		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13618  	for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13619  		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13620  	for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13621  		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13622  	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13623  	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13624  	/* SEND_CM_CREDIT_USED_STATUS read-only */
13625  	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13626  	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13627  	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13628  	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13629  	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13630  	for (i = 0; i < TXE_NUM_DATA_VL; i++)
13631  		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13632  	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13633  	/* SEND_CM_CREDIT_USED_VL read-only */
13634  	/* SEND_CM_CREDIT_USED_VL15 read-only */
13635  	/* SEND_EGRESS_CTXT_STATUS read-only */
13636  	/* SEND_EGRESS_SEND_DMA_STATUS read-only */
13637  	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13638  	/* SEND_EGRESS_ERR_INFO read-only */
13639  	/* SEND_EGRESS_ERR_SOURCE read-only */
13640  
13641  	/*
13642  	 * TXE Per-Context CSRs
13643  	 */
13644  	for (i = 0; i < chip_send_contexts(dd); i++) {
13645  		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13646  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13647  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13648  		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13649  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13650  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13651  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13652  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13653  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13654  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13655  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13656  		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13657  	}
13658  
13659  	/*
13660  	 * TXE Per-SDMA CSRs
13661  	 */
13662  	for (i = 0; i < chip_sdma_engines(dd); i++) {
13663  		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13664  		/* SEND_DMA_STATUS read-only */
13665  		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13666  		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13667  		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13668  		/* SEND_DMA_HEAD read-only */
13669  		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13670  		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13671  		/* SEND_DMA_IDLE_CNT read-only */
13672  		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13673  		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13674  		/* SEND_DMA_DESC_FETCHED_CNT read-only */
13675  		/* SEND_DMA_ENG_ERR_STATUS read-only */
13676  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13677  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13678  		/* SEND_DMA_ENG_ERR_FORCE leave alone */
13679  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13680  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13681  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13682  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13683  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13684  		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13685  		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13686  	}
13687  }
13688  
13689  /*
13690   * Expect on entry:
13691   * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13692   */
init_rbufs(struct hfi1_devdata * dd)13693  static void init_rbufs(struct hfi1_devdata *dd)
13694  {
13695  	u64 reg;
13696  	int count;
13697  
13698  	/*
13699  	 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13700  	 * clear.
13701  	 */
13702  	count = 0;
13703  	while (1) {
13704  		reg = read_csr(dd, RCV_STATUS);
13705  		if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13706  			    | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13707  			break;
13708  		/*
13709  		 * Give up after 1ms - maximum wait time.
13710  		 *
13711  		 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13712  		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13713  		 *	136 KB / (66% * 250MB/s) = 844us
13714  		 */
13715  		if (count++ > 500) {
13716  			dd_dev_err(dd,
13717  				   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13718  				   __func__, reg);
13719  			break;
13720  		}
13721  		udelay(2); /* do not busy-wait the CSR */
13722  	}
13723  
13724  	/* start the init - expect RcvCtrl to be 0 */
13725  	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13726  
13727  	/*
13728  	 * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13729  	 * period after the write before RcvStatus.RxRbufInitDone is valid.
13730  	 * The delay in the first run through the loop below is sufficient and
13731  	 * required before the first read of RcvStatus.RxRbufInintDone.
13732  	 */
13733  	read_csr(dd, RCV_CTRL);
13734  
13735  	/* wait for the init to finish */
13736  	count = 0;
13737  	while (1) {
13738  		/* delay is required first time through - see above */
13739  		udelay(2); /* do not busy-wait the CSR */
13740  		reg = read_csr(dd, RCV_STATUS);
13741  		if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13742  			break;
13743  
13744  		/* give up after 100us - slowest possible at 33MHz is 73us */
13745  		if (count++ > 50) {
13746  			dd_dev_err(dd,
13747  				   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13748  				   __func__);
13749  			break;
13750  		}
13751  	}
13752  }
13753  
13754  /* set RXE CSRs to chip reset defaults */
reset_rxe_csrs(struct hfi1_devdata * dd)13755  static void reset_rxe_csrs(struct hfi1_devdata *dd)
13756  {
13757  	int i, j;
13758  
13759  	/*
13760  	 * RXE Kernel CSRs
13761  	 */
13762  	write_csr(dd, RCV_CTRL, 0);
13763  	init_rbufs(dd);
13764  	/* RCV_STATUS read-only */
13765  	/* RCV_CONTEXTS read-only */
13766  	/* RCV_ARRAY_CNT read-only */
13767  	/* RCV_BUF_SIZE read-only */
13768  	write_csr(dd, RCV_BTH_QP, 0);
13769  	write_csr(dd, RCV_MULTICAST, 0);
13770  	write_csr(dd, RCV_BYPASS, 0);
13771  	write_csr(dd, RCV_VL15, 0);
13772  	/* this is a clear-down */
13773  	write_csr(dd, RCV_ERR_INFO,
13774  		  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13775  	/* RCV_ERR_STATUS read-only */
13776  	write_csr(dd, RCV_ERR_MASK, 0);
13777  	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13778  	/* RCV_ERR_FORCE leave alone */
13779  	for (i = 0; i < 32; i++)
13780  		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13781  	for (i = 0; i < 4; i++)
13782  		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13783  	for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13784  		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13785  	for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13786  		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13787  	for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13788  		clear_rsm_rule(dd, i);
13789  	for (i = 0; i < 32; i++)
13790  		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13791  
13792  	/*
13793  	 * RXE Kernel and User Per-Context CSRs
13794  	 */
13795  	for (i = 0; i < chip_rcv_contexts(dd); i++) {
13796  		/* kernel */
13797  		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13798  		/* RCV_CTXT_STATUS read-only */
13799  		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13800  		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13801  		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13802  		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13803  		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13804  		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13805  		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13806  		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13807  		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13808  		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13809  
13810  		/* user */
13811  		/* RCV_HDR_TAIL read-only */
13812  		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13813  		/* RCV_EGR_INDEX_TAIL read-only */
13814  		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13815  		/* RCV_EGR_OFFSET_TAIL read-only */
13816  		for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13817  			write_uctxt_csr(dd, i,
13818  					RCV_TID_FLOW_TABLE + (8 * j), 0);
13819  		}
13820  	}
13821  }
13822  
13823  /*
13824   * Set sc2vl tables.
13825   *
13826   * They power on to zeros, so to avoid send context errors
13827   * they need to be set:
13828   *
13829   * SC 0-7 -> VL 0-7 (respectively)
13830   * SC 15  -> VL 15
13831   * otherwise
13832   *        -> VL 0
13833   */
init_sc2vl_tables(struct hfi1_devdata * dd)13834  static void init_sc2vl_tables(struct hfi1_devdata *dd)
13835  {
13836  	int i;
13837  	/* init per architecture spec, constrained by hardware capability */
13838  
13839  	/* HFI maps sent packets */
13840  	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13841  		0,
13842  		0, 0, 1, 1,
13843  		2, 2, 3, 3,
13844  		4, 4, 5, 5,
13845  		6, 6, 7, 7));
13846  	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13847  		1,
13848  		8, 0, 9, 0,
13849  		10, 0, 11, 0,
13850  		12, 0, 13, 0,
13851  		14, 0, 15, 15));
13852  	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13853  		2,
13854  		16, 0, 17, 0,
13855  		18, 0, 19, 0,
13856  		20, 0, 21, 0,
13857  		22, 0, 23, 0));
13858  	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13859  		3,
13860  		24, 0, 25, 0,
13861  		26, 0, 27, 0,
13862  		28, 0, 29, 0,
13863  		30, 0, 31, 0));
13864  
13865  	/* DC maps received packets */
13866  	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13867  		15_0,
13868  		0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13869  		8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13870  	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13871  		31_16,
13872  		16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13873  		24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13874  
13875  	/* initialize the cached sc2vl values consistently with h/w */
13876  	for (i = 0; i < 32; i++) {
13877  		if (i < 8 || i == 15)
13878  			*((u8 *)(dd->sc2vl) + i) = (u8)i;
13879  		else
13880  			*((u8 *)(dd->sc2vl) + i) = 0;
13881  	}
13882  }
13883  
13884  /*
13885   * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13886   * depend on the chip going through a power-on reset - a driver may be loaded
13887   * and unloaded many times.
13888   *
13889   * Do not write any CSR values to the chip in this routine - there may be
13890   * a reset following the (possible) FLR in this routine.
13891   *
13892   */
init_chip(struct hfi1_devdata * dd)13893  static int init_chip(struct hfi1_devdata *dd)
13894  {
13895  	int i;
13896  	int ret = 0;
13897  
13898  	/*
13899  	 * Put the HFI CSRs in a known state.
13900  	 * Combine this with a DC reset.
13901  	 *
13902  	 * Stop the device from doing anything while we do a
13903  	 * reset.  We know there are no other active users of
13904  	 * the device since we are now in charge.  Turn off
13905  	 * off all outbound and inbound traffic and make sure
13906  	 * the device does not generate any interrupts.
13907  	 */
13908  
13909  	/* disable send contexts and SDMA engines */
13910  	write_csr(dd, SEND_CTRL, 0);
13911  	for (i = 0; i < chip_send_contexts(dd); i++)
13912  		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13913  	for (i = 0; i < chip_sdma_engines(dd); i++)
13914  		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13915  	/* disable port (turn off RXE inbound traffic) and contexts */
13916  	write_csr(dd, RCV_CTRL, 0);
13917  	for (i = 0; i < chip_rcv_contexts(dd); i++)
13918  		write_csr(dd, RCV_CTXT_CTRL, 0);
13919  	/* mask all interrupt sources */
13920  	for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13921  		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13922  
13923  	/*
13924  	 * DC Reset: do a full DC reset before the register clear.
13925  	 * A recommended length of time to hold is one CSR read,
13926  	 * so reread the CceDcCtrl.  Then, hold the DC in reset
13927  	 * across the clear.
13928  	 */
13929  	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13930  	(void)read_csr(dd, CCE_DC_CTRL);
13931  
13932  	if (use_flr) {
13933  		/*
13934  		 * A FLR will reset the SPC core and part of the PCIe.
13935  		 * The parts that need to be restored have already been
13936  		 * saved.
13937  		 */
13938  		dd_dev_info(dd, "Resetting CSRs with FLR\n");
13939  
13940  		/* do the FLR, the DC reset will remain */
13941  		pcie_flr(dd->pcidev);
13942  
13943  		/* restore command and BARs */
13944  		ret = restore_pci_variables(dd);
13945  		if (ret) {
13946  			dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13947  				   __func__);
13948  			return ret;
13949  		}
13950  
13951  		if (is_ax(dd)) {
13952  			dd_dev_info(dd, "Resetting CSRs with FLR\n");
13953  			pcie_flr(dd->pcidev);
13954  			ret = restore_pci_variables(dd);
13955  			if (ret) {
13956  				dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13957  					   __func__);
13958  				return ret;
13959  			}
13960  		}
13961  	} else {
13962  		dd_dev_info(dd, "Resetting CSRs with writes\n");
13963  		reset_cce_csrs(dd);
13964  		reset_txe_csrs(dd);
13965  		reset_rxe_csrs(dd);
13966  		reset_misc_csrs(dd);
13967  	}
13968  	/* clear the DC reset */
13969  	write_csr(dd, CCE_DC_CTRL, 0);
13970  
13971  	/* Set the LED off */
13972  	setextled(dd, 0);
13973  
13974  	/*
13975  	 * Clear the QSFP reset.
13976  	 * An FLR enforces a 0 on all out pins. The driver does not touch
13977  	 * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13978  	 * anything plugged constantly in reset, if it pays attention
13979  	 * to RESET_N.
13980  	 * Prime examples of this are optical cables. Set all pins high.
13981  	 * I2CCLK and I2CDAT will change per direction, and INT_N and
13982  	 * MODPRS_N are input only and their value is ignored.
13983  	 */
13984  	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13985  	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13986  	init_chip_resources(dd);
13987  	return ret;
13988  }
13989  
init_early_variables(struct hfi1_devdata * dd)13990  static void init_early_variables(struct hfi1_devdata *dd)
13991  {
13992  	int i;
13993  
13994  	/* assign link credit variables */
13995  	dd->vau = CM_VAU;
13996  	dd->link_credits = CM_GLOBAL_CREDITS;
13997  	if (is_ax(dd))
13998  		dd->link_credits--;
13999  	dd->vcu = cu_to_vcu(hfi1_cu);
14000  	/* enough room for 8 MAD packets plus header - 17K */
14001  	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14002  	if (dd->vl15_init > dd->link_credits)
14003  		dd->vl15_init = dd->link_credits;
14004  
14005  	write_uninitialized_csrs_and_memories(dd);
14006  
14007  	if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14008  		for (i = 0; i < dd->num_pports; i++) {
14009  			struct hfi1_pportdata *ppd = &dd->pport[i];
14010  
14011  			set_partition_keys(ppd);
14012  		}
14013  	init_sc2vl_tables(dd);
14014  }
14015  
init_kdeth_qp(struct hfi1_devdata * dd)14016  static void init_kdeth_qp(struct hfi1_devdata *dd)
14017  {
14018  	/* user changed the KDETH_QP */
14019  	if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14020  		/* out of range or illegal value */
14021  		dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14022  		kdeth_qp = 0;
14023  	}
14024  	if (kdeth_qp == 0)	/* not set, or failed range check */
14025  		kdeth_qp = DEFAULT_KDETH_QP;
14026  
14027  	write_csr(dd, SEND_BTH_QP,
14028  		  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14029  		  SEND_BTH_QP_KDETH_QP_SHIFT);
14030  
14031  	write_csr(dd, RCV_BTH_QP,
14032  		  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14033  		  RCV_BTH_QP_KDETH_QP_SHIFT);
14034  }
14035  
14036  /**
14037   * hfi1_get_qp_map
14038   * @dd: device data
14039   * @idx: index to read
14040   */
hfi1_get_qp_map(struct hfi1_devdata * dd,u8 idx)14041  u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14042  {
14043  	u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14044  
14045  	reg >>= (idx % 8) * 8;
14046  	return reg;
14047  }
14048  
14049  /**
14050   * init_qpmap_table
14051   * @dd - device data
14052   * @first_ctxt - first context
14053   * @last_ctxt - first context
14054   *
14055   * This return sets the qpn mapping table that
14056   * is indexed by qpn[8:1].
14057   *
14058   * The routine will round robin the 256 settings
14059   * from first_ctxt to last_ctxt.
14060   *
14061   * The first/last looks ahead to having specialized
14062   * receive contexts for mgmt and bypass.  Normal
14063   * verbs traffic will assumed to be on a range
14064   * of receive contexts.
14065   */
init_qpmap_table(struct hfi1_devdata * dd,u32 first_ctxt,u32 last_ctxt)14066  static void init_qpmap_table(struct hfi1_devdata *dd,
14067  			     u32 first_ctxt,
14068  			     u32 last_ctxt)
14069  {
14070  	u64 reg = 0;
14071  	u64 regno = RCV_QP_MAP_TABLE;
14072  	int i;
14073  	u64 ctxt = first_ctxt;
14074  
14075  	for (i = 0; i < 256; i++) {
14076  		reg |= ctxt << (8 * (i % 8));
14077  		ctxt++;
14078  		if (ctxt > last_ctxt)
14079  			ctxt = first_ctxt;
14080  		if (i % 8 == 7) {
14081  			write_csr(dd, regno, reg);
14082  			reg = 0;
14083  			regno += 8;
14084  		}
14085  	}
14086  
14087  	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14088  			| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14089  }
14090  
14091  struct rsm_map_table {
14092  	u64 map[NUM_MAP_REGS];
14093  	unsigned int used;
14094  };
14095  
14096  struct rsm_rule_data {
14097  	u8 offset;
14098  	u8 pkt_type;
14099  	u32 field1_off;
14100  	u32 field2_off;
14101  	u32 index1_off;
14102  	u32 index1_width;
14103  	u32 index2_off;
14104  	u32 index2_width;
14105  	u32 mask1;
14106  	u32 value1;
14107  	u32 mask2;
14108  	u32 value2;
14109  };
14110  
14111  /*
14112   * Return an initialized RMT map table for users to fill in.  OK if it
14113   * returns NULL, indicating no table.
14114   */
alloc_rsm_map_table(struct hfi1_devdata * dd)14115  static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14116  {
14117  	struct rsm_map_table *rmt;
14118  	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14119  
14120  	rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14121  	if (rmt) {
14122  		memset(rmt->map, rxcontext, sizeof(rmt->map));
14123  		rmt->used = 0;
14124  	}
14125  
14126  	return rmt;
14127  }
14128  
14129  /*
14130   * Write the final RMT map table to the chip and free the table.  OK if
14131   * table is NULL.
14132   */
complete_rsm_map_table(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14133  static void complete_rsm_map_table(struct hfi1_devdata *dd,
14134  				   struct rsm_map_table *rmt)
14135  {
14136  	int i;
14137  
14138  	if (rmt) {
14139  		/* write table to chip */
14140  		for (i = 0; i < NUM_MAP_REGS; i++)
14141  			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14142  
14143  		/* enable RSM */
14144  		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14145  	}
14146  }
14147  
14148  /*
14149   * Add a receive side mapping rule.
14150   */
add_rsm_rule(struct hfi1_devdata * dd,u8 rule_index,struct rsm_rule_data * rrd)14151  static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14152  			 struct rsm_rule_data *rrd)
14153  {
14154  	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14155  		  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14156  		  1ull << rule_index | /* enable bit */
14157  		  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14158  	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14159  		  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14160  		  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14161  		  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14162  		  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14163  		  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14164  		  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14165  	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14166  		  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14167  		  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14168  		  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14169  		  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14170  }
14171  
14172  /*
14173   * Clear a receive side mapping rule.
14174   */
clear_rsm_rule(struct hfi1_devdata * dd,u8 rule_index)14175  static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14176  {
14177  	write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14178  	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14179  	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14180  }
14181  
14182  /* return the number of RSM map table entries that will be used for QOS */
qos_rmt_entries(struct hfi1_devdata * dd,unsigned int * mp,unsigned int * np)14183  static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14184  			   unsigned int *np)
14185  {
14186  	int i;
14187  	unsigned int m, n;
14188  	u8 max_by_vl = 0;
14189  
14190  	/* is QOS active at all? */
14191  	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14192  	    num_vls == 1 ||
14193  	    krcvqsset <= 1)
14194  		goto no_qos;
14195  
14196  	/* determine bits for qpn */
14197  	for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14198  		if (krcvqs[i] > max_by_vl)
14199  			max_by_vl = krcvqs[i];
14200  	if (max_by_vl > 32)
14201  		goto no_qos;
14202  	m = ilog2(__roundup_pow_of_two(max_by_vl));
14203  
14204  	/* determine bits for vl */
14205  	n = ilog2(__roundup_pow_of_two(num_vls));
14206  
14207  	/* reject if too much is used */
14208  	if ((m + n) > 7)
14209  		goto no_qos;
14210  
14211  	if (mp)
14212  		*mp = m;
14213  	if (np)
14214  		*np = n;
14215  
14216  	return 1 << (m + n);
14217  
14218  no_qos:
14219  	if (mp)
14220  		*mp = 0;
14221  	if (np)
14222  		*np = 0;
14223  	return 0;
14224  }
14225  
14226  /**
14227   * init_qos - init RX qos
14228   * @dd - device data
14229   * @rmt - RSM map table
14230   *
14231   * This routine initializes Rule 0 and the RSM map table to implement
14232   * quality of service (qos).
14233   *
14234   * If all of the limit tests succeed, qos is applied based on the array
14235   * interpretation of krcvqs where entry 0 is VL0.
14236   *
14237   * The number of vl bits (n) and the number of qpn bits (m) are computed to
14238   * feed both the RSM map table and the single rule.
14239   */
init_qos(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14240  static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14241  {
14242  	struct rsm_rule_data rrd;
14243  	unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14244  	unsigned int rmt_entries;
14245  	u64 reg;
14246  
14247  	if (!rmt)
14248  		goto bail;
14249  	rmt_entries = qos_rmt_entries(dd, &m, &n);
14250  	if (rmt_entries == 0)
14251  		goto bail;
14252  	qpns_per_vl = 1 << m;
14253  
14254  	/* enough room in the map table? */
14255  	rmt_entries = 1 << (m + n);
14256  	if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14257  		goto bail;
14258  
14259  	/* add qos entries to the the RSM map table */
14260  	for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14261  		unsigned tctxt;
14262  
14263  		for (qpn = 0, tctxt = ctxt;
14264  		     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14265  			unsigned idx, regoff, regidx;
14266  
14267  			/* generate the index the hardware will produce */
14268  			idx = rmt->used + ((qpn << n) ^ i);
14269  			regoff = (idx % 8) * 8;
14270  			regidx = idx / 8;
14271  			/* replace default with context number */
14272  			reg = rmt->map[regidx];
14273  			reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14274  				<< regoff);
14275  			reg |= (u64)(tctxt++) << regoff;
14276  			rmt->map[regidx] = reg;
14277  			if (tctxt == ctxt + krcvqs[i])
14278  				tctxt = ctxt;
14279  		}
14280  		ctxt += krcvqs[i];
14281  	}
14282  
14283  	rrd.offset = rmt->used;
14284  	rrd.pkt_type = 2;
14285  	rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14286  	rrd.field2_off = LRH_SC_MATCH_OFFSET;
14287  	rrd.index1_off = LRH_SC_SELECT_OFFSET;
14288  	rrd.index1_width = n;
14289  	rrd.index2_off = QPN_SELECT_OFFSET;
14290  	rrd.index2_width = m + n;
14291  	rrd.mask1 = LRH_BTH_MASK;
14292  	rrd.value1 = LRH_BTH_VALUE;
14293  	rrd.mask2 = LRH_SC_MASK;
14294  	rrd.value2 = LRH_SC_VALUE;
14295  
14296  	/* add rule 0 */
14297  	add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14298  
14299  	/* mark RSM map entries as used */
14300  	rmt->used += rmt_entries;
14301  	/* map everything else to the mcast/err/vl15 context */
14302  	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14303  	dd->qos_shift = n + 1;
14304  	return;
14305  bail:
14306  	dd->qos_shift = 1;
14307  	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14308  }
14309  
init_fecn_handling(struct hfi1_devdata * dd,struct rsm_map_table * rmt)14310  static void init_fecn_handling(struct hfi1_devdata *dd,
14311  			       struct rsm_map_table *rmt)
14312  {
14313  	struct rsm_rule_data rrd;
14314  	u64 reg;
14315  	int i, idx, regoff, regidx, start;
14316  	u8 offset;
14317  	u32 total_cnt;
14318  
14319  	if (HFI1_CAP_IS_KSET(TID_RDMA))
14320  		/* Exclude context 0 */
14321  		start = 1;
14322  	else
14323  		start = dd->first_dyn_alloc_ctxt;
14324  
14325  	total_cnt = dd->num_rcv_contexts - start;
14326  
14327  	/* there needs to be enough room in the map table */
14328  	if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14329  		dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14330  		return;
14331  	}
14332  
14333  	/*
14334  	 * RSM will extract the destination context as an index into the
14335  	 * map table.  The destination contexts are a sequential block
14336  	 * in the range start...num_rcv_contexts-1 (inclusive).
14337  	 * Map entries are accessed as offset + extracted value.  Adjust
14338  	 * the added offset so this sequence can be placed anywhere in
14339  	 * the table - as long as the entries themselves do not wrap.
14340  	 * There are only enough bits in offset for the table size, so
14341  	 * start with that to allow for a "negative" offset.
14342  	 */
14343  	offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14344  
14345  	for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14346  	     i++, idx++) {
14347  		/* replace with identity mapping */
14348  		regoff = (idx % 8) * 8;
14349  		regidx = idx / 8;
14350  		reg = rmt->map[regidx];
14351  		reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14352  		reg |= (u64)i << regoff;
14353  		rmt->map[regidx] = reg;
14354  	}
14355  
14356  	/*
14357  	 * For RSM intercept of Expected FECN packets:
14358  	 * o packet type 0 - expected
14359  	 * o match on F (bit 95), using select/match 1, and
14360  	 * o match on SH (bit 133), using select/match 2.
14361  	 *
14362  	 * Use index 1 to extract the 8-bit receive context from DestQP
14363  	 * (start at bit 64).  Use that as the RSM map table index.
14364  	 */
14365  	rrd.offset = offset;
14366  	rrd.pkt_type = 0;
14367  	rrd.field1_off = 95;
14368  	rrd.field2_off = 133;
14369  	rrd.index1_off = 64;
14370  	rrd.index1_width = 8;
14371  	rrd.index2_off = 0;
14372  	rrd.index2_width = 0;
14373  	rrd.mask1 = 1;
14374  	rrd.value1 = 1;
14375  	rrd.mask2 = 1;
14376  	rrd.value2 = 1;
14377  
14378  	/* add rule 1 */
14379  	add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14380  
14381  	rmt->used += total_cnt;
14382  }
14383  
14384  /* Initialize RSM for VNIC */
hfi1_init_vnic_rsm(struct hfi1_devdata * dd)14385  void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14386  {
14387  	u8 i, j;
14388  	u8 ctx_id = 0;
14389  	u64 reg;
14390  	u32 regoff;
14391  	struct rsm_rule_data rrd;
14392  
14393  	if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14394  		dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14395  			   dd->vnic.rmt_start);
14396  		return;
14397  	}
14398  
14399  	dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14400  		dd->vnic.rmt_start,
14401  		dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14402  
14403  	/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14404  	regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14405  	reg = read_csr(dd, regoff);
14406  	for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14407  		/* Update map register with vnic context */
14408  		j = (dd->vnic.rmt_start + i) % 8;
14409  		reg &= ~(0xffllu << (j * 8));
14410  		reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14411  		/* Wrap up vnic ctx index */
14412  		ctx_id %= dd->vnic.num_ctxt;
14413  		/* Write back map register */
14414  		if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14415  			dev_dbg(&(dd)->pcidev->dev,
14416  				"Vnic rsm map reg[%d] =0x%llx\n",
14417  				regoff - RCV_RSM_MAP_TABLE, reg);
14418  
14419  			write_csr(dd, regoff, reg);
14420  			regoff += 8;
14421  			if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14422  				reg = read_csr(dd, regoff);
14423  		}
14424  	}
14425  
14426  	/* Add rule for vnic */
14427  	rrd.offset = dd->vnic.rmt_start;
14428  	rrd.pkt_type = 4;
14429  	/* Match 16B packets */
14430  	rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14431  	rrd.mask1 = L2_TYPE_MASK;
14432  	rrd.value1 = L2_16B_VALUE;
14433  	/* Match ETH L4 packets */
14434  	rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14435  	rrd.mask2 = L4_16B_TYPE_MASK;
14436  	rrd.value2 = L4_16B_ETH_VALUE;
14437  	/* Calc context from veswid and entropy */
14438  	rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14439  	rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14440  	rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14441  	rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14442  	add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14443  
14444  	/* Enable RSM if not already enabled */
14445  	add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14446  }
14447  
hfi1_deinit_vnic_rsm(struct hfi1_devdata * dd)14448  void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14449  {
14450  	clear_rsm_rule(dd, RSM_INS_VNIC);
14451  
14452  	/* Disable RSM if used only by vnic */
14453  	if (dd->vnic.rmt_start == 0)
14454  		clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14455  }
14456  
init_rxe(struct hfi1_devdata * dd)14457  static int init_rxe(struct hfi1_devdata *dd)
14458  {
14459  	struct rsm_map_table *rmt;
14460  	u64 val;
14461  
14462  	/* enable all receive errors */
14463  	write_csr(dd, RCV_ERR_MASK, ~0ull);
14464  
14465  	rmt = alloc_rsm_map_table(dd);
14466  	if (!rmt)
14467  		return -ENOMEM;
14468  
14469  	/* set up QOS, including the QPN map table */
14470  	init_qos(dd, rmt);
14471  	init_fecn_handling(dd, rmt);
14472  	complete_rsm_map_table(dd, rmt);
14473  	/* record number of used rsm map entries for vnic */
14474  	dd->vnic.rmt_start = rmt->used;
14475  	kfree(rmt);
14476  
14477  	/*
14478  	 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14479  	 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14480  	 * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14481  	 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14482  	 * Max_PayLoad_Size set to its minimum of 128.
14483  	 *
14484  	 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14485  	 * (64 bytes).  Max_Payload_Size is possibly modified upward in
14486  	 * tune_pcie_caps() which is called after this routine.
14487  	 */
14488  
14489  	/* Have 16 bytes (4DW) of bypass header available in header queue */
14490  	val = read_csr(dd, RCV_BYPASS);
14491  	val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14492  	val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14493  		RCV_BYPASS_HDR_SIZE_SHIFT);
14494  	write_csr(dd, RCV_BYPASS, val);
14495  	return 0;
14496  }
14497  
init_other(struct hfi1_devdata * dd)14498  static void init_other(struct hfi1_devdata *dd)
14499  {
14500  	/* enable all CCE errors */
14501  	write_csr(dd, CCE_ERR_MASK, ~0ull);
14502  	/* enable *some* Misc errors */
14503  	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14504  	/* enable all DC errors, except LCB */
14505  	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14506  	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14507  }
14508  
14509  /*
14510   * Fill out the given AU table using the given CU.  A CU is defined in terms
14511   * AUs.  The table is a an encoding: given the index, how many AUs does that
14512   * represent?
14513   *
14514   * NOTE: Assumes that the register layout is the same for the
14515   * local and remote tables.
14516   */
assign_cm_au_table(struct hfi1_devdata * dd,u32 cu,u32 csr0to3,u32 csr4to7)14517  static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14518  			       u32 csr0to3, u32 csr4to7)
14519  {
14520  	write_csr(dd, csr0to3,
14521  		  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14522  		  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14523  		  2ull * cu <<
14524  		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14525  		  4ull * cu <<
14526  		  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14527  	write_csr(dd, csr4to7,
14528  		  8ull * cu <<
14529  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14530  		  16ull * cu <<
14531  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14532  		  32ull * cu <<
14533  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14534  		  64ull * cu <<
14535  		  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14536  }
14537  
assign_local_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14538  static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14539  {
14540  	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14541  			   SEND_CM_LOCAL_AU_TABLE4_TO7);
14542  }
14543  
assign_remote_cm_au_table(struct hfi1_devdata * dd,u8 vcu)14544  void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14545  {
14546  	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14547  			   SEND_CM_REMOTE_AU_TABLE4_TO7);
14548  }
14549  
init_txe(struct hfi1_devdata * dd)14550  static void init_txe(struct hfi1_devdata *dd)
14551  {
14552  	int i;
14553  
14554  	/* enable all PIO, SDMA, general, and Egress errors */
14555  	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14556  	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14557  	write_csr(dd, SEND_ERR_MASK, ~0ull);
14558  	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14559  
14560  	/* enable all per-context and per-SDMA engine errors */
14561  	for (i = 0; i < chip_send_contexts(dd); i++)
14562  		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14563  	for (i = 0; i < chip_sdma_engines(dd); i++)
14564  		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14565  
14566  	/* set the local CU to AU mapping */
14567  	assign_local_cm_au_table(dd, dd->vcu);
14568  
14569  	/*
14570  	 * Set reasonable default for Credit Return Timer
14571  	 * Don't set on Simulator - causes it to choke.
14572  	 */
14573  	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14574  		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14575  }
14576  
hfi1_set_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 jkey)14577  int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14578  		       u16 jkey)
14579  {
14580  	u8 hw_ctxt;
14581  	u64 reg;
14582  
14583  	if (!rcd || !rcd->sc)
14584  		return -EINVAL;
14585  
14586  	hw_ctxt = rcd->sc->hw_context;
14587  	reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14588  		((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14589  		 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14590  	/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14591  	if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14592  		reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14593  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14594  	/*
14595  	 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14596  	 */
14597  	if (!is_ax(dd)) {
14598  		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14599  		reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14600  		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14601  	}
14602  
14603  	/* Enable J_KEY check on receive context. */
14604  	reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14605  		((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14606  		 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14607  	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14608  
14609  	return 0;
14610  }
14611  
hfi1_clear_ctxt_jkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)14612  int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14613  {
14614  	u8 hw_ctxt;
14615  	u64 reg;
14616  
14617  	if (!rcd || !rcd->sc)
14618  		return -EINVAL;
14619  
14620  	hw_ctxt = rcd->sc->hw_context;
14621  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14622  	/*
14623  	 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14624  	 * This check would not have been enabled for A0 h/w, see
14625  	 * set_ctxt_jkey().
14626  	 */
14627  	if (!is_ax(dd)) {
14628  		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14629  		reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14630  		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14631  	}
14632  	/* Turn off the J_KEY on the receive side */
14633  	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14634  
14635  	return 0;
14636  }
14637  
hfi1_set_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 pkey)14638  int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14639  		       u16 pkey)
14640  {
14641  	u8 hw_ctxt;
14642  	u64 reg;
14643  
14644  	if (!rcd || !rcd->sc)
14645  		return -EINVAL;
14646  
14647  	hw_ctxt = rcd->sc->hw_context;
14648  	reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14649  		SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14650  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14651  	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14652  	reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14653  	reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14654  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14655  
14656  	return 0;
14657  }
14658  
hfi1_clear_ctxt_pkey(struct hfi1_devdata * dd,struct hfi1_ctxtdata * ctxt)14659  int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14660  {
14661  	u8 hw_ctxt;
14662  	u64 reg;
14663  
14664  	if (!ctxt || !ctxt->sc)
14665  		return -EINVAL;
14666  
14667  	hw_ctxt = ctxt->sc->hw_context;
14668  	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14669  	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14670  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14671  	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14672  
14673  	return 0;
14674  }
14675  
14676  /*
14677   * Start doing the clean up the the chip. Our clean up happens in multiple
14678   * stages and this is just the first.
14679   */
hfi1_start_cleanup(struct hfi1_devdata * dd)14680  void hfi1_start_cleanup(struct hfi1_devdata *dd)
14681  {
14682  	aspm_exit(dd);
14683  	free_cntrs(dd);
14684  	free_rcverr(dd);
14685  	finish_chip_resources(dd);
14686  }
14687  
14688  #define HFI_BASE_GUID(dev) \
14689  	((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14690  
14691  /*
14692   * Information can be shared between the two HFIs on the same ASIC
14693   * in the same OS.  This function finds the peer device and sets
14694   * up a shared structure.
14695   */
init_asic_data(struct hfi1_devdata * dd)14696  static int init_asic_data(struct hfi1_devdata *dd)
14697  {
14698  	unsigned long index;
14699  	struct hfi1_devdata *peer;
14700  	struct hfi1_asic_data *asic_data;
14701  	int ret = 0;
14702  
14703  	/* pre-allocate the asic structure in case we are the first device */
14704  	asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14705  	if (!asic_data)
14706  		return -ENOMEM;
14707  
14708  	xa_lock_irq(&hfi1_dev_table);
14709  	/* Find our peer device */
14710  	xa_for_each(&hfi1_dev_table, index, peer) {
14711  		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14712  		    dd->unit != peer->unit)
14713  			break;
14714  	}
14715  
14716  	if (peer) {
14717  		/* use already allocated structure */
14718  		dd->asic_data = peer->asic_data;
14719  		kfree(asic_data);
14720  	} else {
14721  		dd->asic_data = asic_data;
14722  		mutex_init(&dd->asic_data->asic_resource_mutex);
14723  	}
14724  	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14725  	xa_unlock_irq(&hfi1_dev_table);
14726  
14727  	/* first one through - set up i2c devices */
14728  	if (!peer)
14729  		ret = set_up_i2c(dd, dd->asic_data);
14730  
14731  	return ret;
14732  }
14733  
14734  /*
14735   * Set dd->boardname.  Use a generic name if a name is not returned from
14736   * EFI variable space.
14737   *
14738   * Return 0 on success, -ENOMEM if space could not be allocated.
14739   */
obtain_boardname(struct hfi1_devdata * dd)14740  static int obtain_boardname(struct hfi1_devdata *dd)
14741  {
14742  	/* generic board description */
14743  	const char generic[] =
14744  		"Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14745  	unsigned long size;
14746  	int ret;
14747  
14748  	ret = read_hfi1_efi_var(dd, "description", &size,
14749  				(void **)&dd->boardname);
14750  	if (ret) {
14751  		dd_dev_info(dd, "Board description not found\n");
14752  		/* use generic description */
14753  		dd->boardname = kstrdup(generic, GFP_KERNEL);
14754  		if (!dd->boardname)
14755  			return -ENOMEM;
14756  	}
14757  	return 0;
14758  }
14759  
14760  /*
14761   * Check the interrupt registers to make sure that they are mapped correctly.
14762   * It is intended to help user identify any mismapping by VMM when the driver
14763   * is running in a VM. This function should only be called before interrupt
14764   * is set up properly.
14765   *
14766   * Return 0 on success, -EINVAL on failure.
14767   */
check_int_registers(struct hfi1_devdata * dd)14768  static int check_int_registers(struct hfi1_devdata *dd)
14769  {
14770  	u64 reg;
14771  	u64 all_bits = ~(u64)0;
14772  	u64 mask;
14773  
14774  	/* Clear CceIntMask[0] to avoid raising any interrupts */
14775  	mask = read_csr(dd, CCE_INT_MASK);
14776  	write_csr(dd, CCE_INT_MASK, 0ull);
14777  	reg = read_csr(dd, CCE_INT_MASK);
14778  	if (reg)
14779  		goto err_exit;
14780  
14781  	/* Clear all interrupt status bits */
14782  	write_csr(dd, CCE_INT_CLEAR, all_bits);
14783  	reg = read_csr(dd, CCE_INT_STATUS);
14784  	if (reg)
14785  		goto err_exit;
14786  
14787  	/* Set all interrupt status bits */
14788  	write_csr(dd, CCE_INT_FORCE, all_bits);
14789  	reg = read_csr(dd, CCE_INT_STATUS);
14790  	if (reg != all_bits)
14791  		goto err_exit;
14792  
14793  	/* Restore the interrupt mask */
14794  	write_csr(dd, CCE_INT_CLEAR, all_bits);
14795  	write_csr(dd, CCE_INT_MASK, mask);
14796  
14797  	return 0;
14798  err_exit:
14799  	write_csr(dd, CCE_INT_MASK, mask);
14800  	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14801  	return -EINVAL;
14802  }
14803  
14804  /**
14805   * hfi1_init_dd() - Initialize most of the dd structure.
14806   * @dev: the pci_dev for hfi1_ib device
14807   * @ent: pci_device_id struct for this dev
14808   *
14809   * This is global, and is called directly at init to set up the
14810   * chip-specific function pointers for later use.
14811   */
hfi1_init_dd(struct hfi1_devdata * dd)14812  int hfi1_init_dd(struct hfi1_devdata *dd)
14813  {
14814  	struct pci_dev *pdev = dd->pcidev;
14815  	struct hfi1_pportdata *ppd;
14816  	u64 reg;
14817  	int i, ret;
14818  	static const char * const inames[] = { /* implementation names */
14819  		"RTL silicon",
14820  		"RTL VCS simulation",
14821  		"RTL FPGA emulation",
14822  		"Functional simulator"
14823  	};
14824  	struct pci_dev *parent = pdev->bus->self;
14825  	u32 sdma_engines = chip_sdma_engines(dd);
14826  
14827  	ppd = dd->pport;
14828  	for (i = 0; i < dd->num_pports; i++, ppd++) {
14829  		int vl;
14830  		/* init common fields */
14831  		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14832  		/* DC supports 4 link widths */
14833  		ppd->link_width_supported =
14834  			OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14835  			OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14836  		ppd->link_width_downgrade_supported =
14837  			ppd->link_width_supported;
14838  		/* start out enabling only 4X */
14839  		ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14840  		ppd->link_width_downgrade_enabled =
14841  					ppd->link_width_downgrade_supported;
14842  		/* link width active is 0 when link is down */
14843  		/* link width downgrade active is 0 when link is down */
14844  
14845  		if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14846  		    num_vls > HFI1_MAX_VLS_SUPPORTED) {
14847  			dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14848  				   num_vls, HFI1_MAX_VLS_SUPPORTED);
14849  			num_vls = HFI1_MAX_VLS_SUPPORTED;
14850  		}
14851  		ppd->vls_supported = num_vls;
14852  		ppd->vls_operational = ppd->vls_supported;
14853  		/* Set the default MTU. */
14854  		for (vl = 0; vl < num_vls; vl++)
14855  			dd->vld[vl].mtu = hfi1_max_mtu;
14856  		dd->vld[15].mtu = MAX_MAD_PACKET;
14857  		/*
14858  		 * Set the initial values to reasonable default, will be set
14859  		 * for real when link is up.
14860  		 */
14861  		ppd->overrun_threshold = 0x4;
14862  		ppd->phy_error_threshold = 0xf;
14863  		ppd->port_crc_mode_enabled = link_crc_mask;
14864  		/* initialize supported LTP CRC mode */
14865  		ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14866  		/* initialize enabled LTP CRC mode */
14867  		ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14868  		/* start in offline */
14869  		ppd->host_link_state = HLS_DN_OFFLINE;
14870  		init_vl_arb_caches(ppd);
14871  	}
14872  
14873  	/*
14874  	 * Do remaining PCIe setup and save PCIe values in dd.
14875  	 * Any error printing is already done by the init code.
14876  	 * On return, we have the chip mapped.
14877  	 */
14878  	ret = hfi1_pcie_ddinit(dd, pdev);
14879  	if (ret < 0)
14880  		goto bail_free;
14881  
14882  	/* Save PCI space registers to rewrite after device reset */
14883  	ret = save_pci_variables(dd);
14884  	if (ret < 0)
14885  		goto bail_cleanup;
14886  
14887  	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14888  			& CCE_REVISION_CHIP_REV_MAJOR_MASK;
14889  	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14890  			& CCE_REVISION_CHIP_REV_MINOR_MASK;
14891  
14892  	/*
14893  	 * Check interrupt registers mapping if the driver has no access to
14894  	 * the upstream component. In this case, it is likely that the driver
14895  	 * is running in a VM.
14896  	 */
14897  	if (!parent) {
14898  		ret = check_int_registers(dd);
14899  		if (ret)
14900  			goto bail_cleanup;
14901  	}
14902  
14903  	/*
14904  	 * obtain the hardware ID - NOT related to unit, which is a
14905  	 * software enumeration
14906  	 */
14907  	reg = read_csr(dd, CCE_REVISION2);
14908  	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14909  					& CCE_REVISION2_HFI_ID_MASK;
14910  	/* the variable size will remove unwanted bits */
14911  	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14912  	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14913  	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14914  		    dd->icode < ARRAY_SIZE(inames) ?
14915  		    inames[dd->icode] : "unknown", (int)dd->irev);
14916  
14917  	/* speeds the hardware can support */
14918  	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14919  	/* speeds allowed to run at */
14920  	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14921  	/* give a reasonable active value, will be set on link up */
14922  	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14923  
14924  	/* fix up link widths for emulation _p */
14925  	ppd = dd->pport;
14926  	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14927  		ppd->link_width_supported =
14928  			ppd->link_width_enabled =
14929  			ppd->link_width_downgrade_supported =
14930  			ppd->link_width_downgrade_enabled =
14931  				OPA_LINK_WIDTH_1X;
14932  	}
14933  	/* insure num_vls isn't larger than number of sdma engines */
14934  	if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14935  		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14936  			   num_vls, sdma_engines);
14937  		num_vls = sdma_engines;
14938  		ppd->vls_supported = sdma_engines;
14939  		ppd->vls_operational = ppd->vls_supported;
14940  	}
14941  
14942  	/*
14943  	 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14944  	 * Limit the max if larger than the field holds.  If timeout is
14945  	 * non-zero, then the calculated field will be at least 1.
14946  	 *
14947  	 * Must be after icode is set up - the cclock rate depends
14948  	 * on knowing the hardware being used.
14949  	 */
14950  	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14951  	if (dd->rcv_intr_timeout_csr >
14952  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14953  		dd->rcv_intr_timeout_csr =
14954  			RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14955  	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14956  		dd->rcv_intr_timeout_csr = 1;
14957  
14958  	/* needs to be done before we look for the peer device */
14959  	read_guid(dd);
14960  
14961  	/* set up shared ASIC data with peer device */
14962  	ret = init_asic_data(dd);
14963  	if (ret)
14964  		goto bail_cleanup;
14965  
14966  	/* obtain chip sizes, reset chip CSRs */
14967  	ret = init_chip(dd);
14968  	if (ret)
14969  		goto bail_cleanup;
14970  
14971  	/* read in the PCIe link speed information */
14972  	ret = pcie_speeds(dd);
14973  	if (ret)
14974  		goto bail_cleanup;
14975  
14976  	/* call before get_platform_config(), after init_chip_resources() */
14977  	ret = eprom_init(dd);
14978  	if (ret)
14979  		goto bail_free_rcverr;
14980  
14981  	/* Needs to be called before hfi1_firmware_init */
14982  	get_platform_config(dd);
14983  
14984  	/* read in firmware */
14985  	ret = hfi1_firmware_init(dd);
14986  	if (ret)
14987  		goto bail_cleanup;
14988  
14989  	/*
14990  	 * In general, the PCIe Gen3 transition must occur after the
14991  	 * chip has been idled (so it won't initiate any PCIe transactions
14992  	 * e.g. an interrupt) and before the driver changes any registers
14993  	 * (the transition will reset the registers).
14994  	 *
14995  	 * In particular, place this call after:
14996  	 * - init_chip()     - the chip will not initiate any PCIe transactions
14997  	 * - pcie_speeds()   - reads the current link speed
14998  	 * - hfi1_firmware_init() - the needed firmware is ready to be
14999  	 *			    downloaded
15000  	 */
15001  	ret = do_pcie_gen3_transition(dd);
15002  	if (ret)
15003  		goto bail_cleanup;
15004  
15005  	/*
15006  	 * This should probably occur in hfi1_pcie_init(), but historically
15007  	 * occurs after the do_pcie_gen3_transition() code.
15008  	 */
15009  	tune_pcie_caps(dd);
15010  
15011  	/* start setting dd values and adjusting CSRs */
15012  	init_early_variables(dd);
15013  
15014  	parse_platform_config(dd);
15015  
15016  	ret = obtain_boardname(dd);
15017  	if (ret)
15018  		goto bail_cleanup;
15019  
15020  	snprintf(dd->boardversion, BOARD_VERS_MAX,
15021  		 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15022  		 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15023  		 (u32)dd->majrev,
15024  		 (u32)dd->minrev,
15025  		 (dd->revision >> CCE_REVISION_SW_SHIFT)
15026  		    & CCE_REVISION_SW_MASK);
15027  
15028  	ret = set_up_context_variables(dd);
15029  	if (ret)
15030  		goto bail_cleanup;
15031  
15032  	/* set initial RXE CSRs */
15033  	ret = init_rxe(dd);
15034  	if (ret)
15035  		goto bail_cleanup;
15036  
15037  	/* set initial TXE CSRs */
15038  	init_txe(dd);
15039  	/* set initial non-RXE, non-TXE CSRs */
15040  	init_other(dd);
15041  	/* set up KDETH QP prefix in both RX and TX CSRs */
15042  	init_kdeth_qp(dd);
15043  
15044  	ret = hfi1_dev_affinity_init(dd);
15045  	if (ret)
15046  		goto bail_cleanup;
15047  
15048  	/* send contexts must be set up before receive contexts */
15049  	ret = init_send_contexts(dd);
15050  	if (ret)
15051  		goto bail_cleanup;
15052  
15053  	ret = hfi1_create_kctxts(dd);
15054  	if (ret)
15055  		goto bail_cleanup;
15056  
15057  	/*
15058  	 * Initialize aspm, to be done after gen3 transition and setting up
15059  	 * contexts and before enabling interrupts
15060  	 */
15061  	aspm_init(dd);
15062  
15063  	ret = init_pervl_scs(dd);
15064  	if (ret)
15065  		goto bail_cleanup;
15066  
15067  	/* sdma init */
15068  	for (i = 0; i < dd->num_pports; ++i) {
15069  		ret = sdma_init(dd, i);
15070  		if (ret)
15071  			goto bail_cleanup;
15072  	}
15073  
15074  	/* use contexts created by hfi1_create_kctxts */
15075  	ret = set_up_interrupts(dd);
15076  	if (ret)
15077  		goto bail_cleanup;
15078  
15079  	ret = hfi1_comp_vectors_set_up(dd);
15080  	if (ret)
15081  		goto bail_clear_intr;
15082  
15083  	/* set up LCB access - must be after set_up_interrupts() */
15084  	init_lcb_access(dd);
15085  
15086  	/*
15087  	 * Serial number is created from the base guid:
15088  	 * [27:24] = base guid [38:35]
15089  	 * [23: 0] = base guid [23: 0]
15090  	 */
15091  	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15092  		 (dd->base_guid & 0xFFFFFF) |
15093  		     ((dd->base_guid >> 11) & 0xF000000));
15094  
15095  	dd->oui1 = dd->base_guid >> 56 & 0xFF;
15096  	dd->oui2 = dd->base_guid >> 48 & 0xFF;
15097  	dd->oui3 = dd->base_guid >> 40 & 0xFF;
15098  
15099  	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15100  	if (ret)
15101  		goto bail_clear_intr;
15102  
15103  	thermal_init(dd);
15104  
15105  	ret = init_cntrs(dd);
15106  	if (ret)
15107  		goto bail_clear_intr;
15108  
15109  	ret = init_rcverr(dd);
15110  	if (ret)
15111  		goto bail_free_cntrs;
15112  
15113  	init_completion(&dd->user_comp);
15114  
15115  	/* The user refcount starts with one to inidicate an active device */
15116  	atomic_set(&dd->user_refcount, 1);
15117  
15118  	goto bail;
15119  
15120  bail_free_rcverr:
15121  	free_rcverr(dd);
15122  bail_free_cntrs:
15123  	free_cntrs(dd);
15124  bail_clear_intr:
15125  	hfi1_comp_vectors_clean_up(dd);
15126  	msix_clean_up_interrupts(dd);
15127  bail_cleanup:
15128  	hfi1_pcie_ddcleanup(dd);
15129  bail_free:
15130  	hfi1_free_devdata(dd);
15131  bail:
15132  	return ret;
15133  }
15134  
delay_cycles(struct hfi1_pportdata * ppd,u32 desired_egress_rate,u32 dw_len)15135  static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15136  			u32 dw_len)
15137  {
15138  	u32 delta_cycles;
15139  	u32 current_egress_rate = ppd->current_egress_rate;
15140  	/* rates here are in units of 10^6 bits/sec */
15141  
15142  	if (desired_egress_rate == -1)
15143  		return 0; /* shouldn't happen */
15144  
15145  	if (desired_egress_rate >= current_egress_rate)
15146  		return 0; /* we can't help go faster, only slower */
15147  
15148  	delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15149  			egress_cycles(dw_len * 4, current_egress_rate);
15150  
15151  	return (u16)delta_cycles;
15152  }
15153  
15154  /**
15155   * create_pbc - build a pbc for transmission
15156   * @flags: special case flags or-ed in built pbc
15157   * @srate: static rate
15158   * @vl: vl
15159   * @dwlen: dword length (header words + data words + pbc words)
15160   *
15161   * Create a PBC with the given flags, rate, VL, and length.
15162   *
15163   * NOTE: The PBC created will not insert any HCRC - all callers but one are
15164   * for verbs, which does not use this PSM feature.  The lone other caller
15165   * is for the diagnostic interface which calls this if the user does not
15166   * supply their own PBC.
15167   */
create_pbc(struct hfi1_pportdata * ppd,u64 flags,int srate_mbs,u32 vl,u32 dw_len)15168  u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15169  	       u32 dw_len)
15170  {
15171  	u64 pbc, delay = 0;
15172  
15173  	if (unlikely(srate_mbs))
15174  		delay = delay_cycles(ppd, srate_mbs, dw_len);
15175  
15176  	pbc = flags
15177  		| (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15178  		| ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15179  		| (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15180  		| (dw_len & PBC_LENGTH_DWS_MASK)
15181  			<< PBC_LENGTH_DWS_SHIFT;
15182  
15183  	return pbc;
15184  }
15185  
15186  #define SBUS_THERMAL    0x4f
15187  #define SBUS_THERM_MONITOR_MODE 0x1
15188  
15189  #define THERM_FAILURE(dev, ret, reason) \
15190  	dd_dev_err((dd),						\
15191  		   "Thermal sensor initialization failed: %s (%d)\n",	\
15192  		   (reason), (ret))
15193  
15194  /*
15195   * Initialize the thermal sensor.
15196   *
15197   * After initialization, enable polling of thermal sensor through
15198   * SBus interface. In order for this to work, the SBus Master
15199   * firmware has to be loaded due to the fact that the HW polling
15200   * logic uses SBus interrupts, which are not supported with
15201   * default firmware. Otherwise, no data will be returned through
15202   * the ASIC_STS_THERM CSR.
15203   */
thermal_init(struct hfi1_devdata * dd)15204  static int thermal_init(struct hfi1_devdata *dd)
15205  {
15206  	int ret = 0;
15207  
15208  	if (dd->icode != ICODE_RTL_SILICON ||
15209  	    check_chip_resource(dd, CR_THERM_INIT, NULL))
15210  		return ret;
15211  
15212  	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15213  	if (ret) {
15214  		THERM_FAILURE(dd, ret, "Acquire SBus");
15215  		return ret;
15216  	}
15217  
15218  	dd_dev_info(dd, "Initializing thermal sensor\n");
15219  	/* Disable polling of thermal readings */
15220  	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15221  	msleep(100);
15222  	/* Thermal Sensor Initialization */
15223  	/*    Step 1: Reset the Thermal SBus Receiver */
15224  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15225  				RESET_SBUS_RECEIVER, 0);
15226  	if (ret) {
15227  		THERM_FAILURE(dd, ret, "Bus Reset");
15228  		goto done;
15229  	}
15230  	/*    Step 2: Set Reset bit in Thermal block */
15231  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15232  				WRITE_SBUS_RECEIVER, 0x1);
15233  	if (ret) {
15234  		THERM_FAILURE(dd, ret, "Therm Block Reset");
15235  		goto done;
15236  	}
15237  	/*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15238  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15239  				WRITE_SBUS_RECEIVER, 0x32);
15240  	if (ret) {
15241  		THERM_FAILURE(dd, ret, "Write Clock Div");
15242  		goto done;
15243  	}
15244  	/*    Step 4: Select temperature mode */
15245  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15246  				WRITE_SBUS_RECEIVER,
15247  				SBUS_THERM_MONITOR_MODE);
15248  	if (ret) {
15249  		THERM_FAILURE(dd, ret, "Write Mode Sel");
15250  		goto done;
15251  	}
15252  	/*    Step 5: De-assert block reset and start conversion */
15253  	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15254  				WRITE_SBUS_RECEIVER, 0x2);
15255  	if (ret) {
15256  		THERM_FAILURE(dd, ret, "Write Reset Deassert");
15257  		goto done;
15258  	}
15259  	/*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15260  	msleep(22);
15261  
15262  	/* Enable polling of thermal readings */
15263  	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15264  
15265  	/* Set initialized flag */
15266  	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15267  	if (ret)
15268  		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15269  
15270  done:
15271  	release_chip_resource(dd, CR_SBUS);
15272  	return ret;
15273  }
15274  
handle_temp_err(struct hfi1_devdata * dd)15275  static void handle_temp_err(struct hfi1_devdata *dd)
15276  {
15277  	struct hfi1_pportdata *ppd = &dd->pport[0];
15278  	/*
15279  	 * Thermal Critical Interrupt
15280  	 * Put the device into forced freeze mode, take link down to
15281  	 * offline, and put DC into reset.
15282  	 */
15283  	dd_dev_emerg(dd,
15284  		     "Critical temperature reached! Forcing device into freeze mode!\n");
15285  	dd->flags |= HFI1_FORCED_FREEZE;
15286  	start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15287  	/*
15288  	 * Shut DC down as much and as quickly as possible.
15289  	 *
15290  	 * Step 1: Take the link down to OFFLINE. This will cause the
15291  	 *         8051 to put the Serdes in reset. However, we don't want to
15292  	 *         go through the entire link state machine since we want to
15293  	 *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15294  	 *         but rather an attempt to save the chip.
15295  	 *         Code below is almost the same as quiet_serdes() but avoids
15296  	 *         all the extra work and the sleeps.
15297  	 */
15298  	ppd->driver_link_ready = 0;
15299  	ppd->link_enabled = 0;
15300  	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15301  				PLS_OFFLINE);
15302  	/*
15303  	 * Step 2: Shutdown LCB and 8051
15304  	 *         After shutdown, do not restore DC_CFG_RESET value.
15305  	 */
15306  	dc_shutdown(dd);
15307  }
15308