1 /*
2  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation,
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, const struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect,
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define BOARD_QMH7360 9
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 		    BOARD_QMH7342)
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165 		    BOARD_QME7342)
166 
167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
168 
169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170 
171 #define MASK_ACROSS(lsb, msb) \
172 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173 
174 #define SYM_RMASK(regname, fldname) ((u64)              \
175 	QIB_7322_##regname##_##fldname##_RMASK)
176 
177 #define SYM_MASK(regname, fldname) ((u64)               \
178 	QIB_7322_##regname##_##fldname##_RMASK <<       \
179 	 QIB_7322_##regname##_##fldname##_LSB)
180 
181 #define SYM_FIELD(value, regname, fldname) ((u64)	\
182 	(((value) >> SYM_LSB(regname, fldname)) &	\
183 	 SYM_RMASK(regname, fldname)))
184 
185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188 
189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194 /* Below because most, but not all, fields of IntMask have that full suffix */
195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196 
197 
198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199 
200 /*
201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
202  * and 7 is reserved.  We currently use only 2KB and 4KB
203  */
204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
208 
209 #define SendIBSLIDAssignMask \
210 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211 #define SendIBSLMCMask \
212 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213 
214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220 
221 #define _QIB_GPIO_SDA_NUM 1
222 #define _QIB_GPIO_SCL_NUM 0
223 #define QIB_EEPROM_WEN_NUM 14
224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
225 
226 /* HW counter clock is at 4nsec */
227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228 
229 /* full speed IB port 1 only */
230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231 #define PORT_SPD_CAP_SHIFT 3
232 
233 /* full speed featuremask, both ports */
234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235 
236 /*
237  * This file contains almost all the chip-specific register information and
238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
239  */
240 
241 /* Use defines to tie machine-generated names to lower-case names */
242 #define kr_contextcnt KREG_IDX(ContextCnt)
243 #define kr_control KREG_IDX(Control)
244 #define kr_counterregbase KREG_IDX(CntrRegBase)
245 #define kr_errclear KREG_IDX(ErrClear)
246 #define kr_errmask KREG_IDX(ErrMask)
247 #define kr_errstatus KREG_IDX(ErrStatus)
248 #define kr_extctrl KREG_IDX(EXTCtrl)
249 #define kr_extstatus KREG_IDX(EXTStatus)
250 #define kr_gpio_clear KREG_IDX(GPIOClear)
251 #define kr_gpio_mask KREG_IDX(GPIOMask)
252 #define kr_gpio_out KREG_IDX(GPIOOut)
253 #define kr_gpio_status KREG_IDX(GPIOStatus)
254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
256 #define kr_fmask KREG_IDX(feature_mask)
257 #define kr_act_fmask KREG_IDX(active_feature_mask)
258 #define kr_hwerrclear KREG_IDX(HwErrClear)
259 #define kr_hwerrmask KREG_IDX(HwErrMask)
260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
261 #define kr_intclear KREG_IDX(IntClear)
262 #define kr_intmask KREG_IDX(IntMask)
263 #define kr_intredirect KREG_IDX(IntRedirect0)
264 #define kr_intstatus KREG_IDX(IntStatus)
265 #define kr_pagealign KREG_IDX(PageAlign)
266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275 #define kr_revision KREG_IDX(Revision)
276 #define kr_scratch KREG_IDX(Scratch)
277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
279 #define kr_sendctrl KREG_IDX(SendCtrl)
280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285 #define kr_sendpiosize KREG_IDX(SendBufSize)
286 #define kr_sendregbase KREG_IDX(SendRegBase)
287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288 #define kr_userregbase KREG_IDX(UserRegBase)
289 #define kr_intgranted KREG_IDX(Int_Granted)
290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291 #define kr_intblocked KREG_IDX(IntBlocked)
292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293 
294 /*
295  * per-port kernel registers.  Access only with qib_read_kreg_port()
296  * or qib_write_kreg_port()
297  */
298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348 
349 /*
350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
351  * or qib_write_kreg_ctxt()
352  */
353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355 
356 /*
357  * TID Flow table, per context.  Reduces
358  * number of hdrq updates to one per flow (or on errors).
359  * context 0 and 1 share same memory, but have distinct
360  * addresses.  Since for now, we never use expected sends
361  * on kernel contexts, we don't worry about that (we initialize
362  * those entries for ctxt 0/1 on driver load twice, for example).
363  */
364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366 
367 /* these are the error bits in the tid flows, and are W1C */
368 #define TIDFLOW_ERRBITS  ( \
369 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373 
374 /* Most (not all) Counters are per-IBport.
375  * Requires LBIntCnt is at offset 0 in the group
376  */
377 #define CREG_IDX(regname) \
378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379 
380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
421 #define crp_wordsend CREG_IDX(TxDwordCnt)
422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423 
424 /* these are the (few) counters that are not port-specific */
425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434 
435 /* no chip register for # of IB ports supported, so define */
436 #define NUM_IB_PORTS 2
437 
438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
439 #define NUM_VL15_BUFS NUM_IB_PORTS
440 
441 /*
442  * context 0 and 1 are special, and there is no chip register that
443  * defines this value, so we have to define it here.
444  * These are all allocated to either 0 or 1 for single port
445  * hardware configuration, otherwise each gets half
446  */
447 #define KCTXT0_EGRCNT 2048
448 
449 /* values for vl and port fields in PBC, 7322-specific */
450 #define PBC_PORT_SEL_LSB 26
451 #define PBC_PORT_SEL_RMASK 1
452 #define PBC_VL_NUM_LSB 27
453 #define PBC_VL_NUM_RMASK 7
454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
456 
457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458 	[IB_RATE_2_5_GBPS] = 16,
459 	[IB_RATE_5_GBPS] = 8,
460 	[IB_RATE_10_GBPS] = 4,
461 	[IB_RATE_20_GBPS] = 2,
462 	[IB_RATE_30_GBPS] = 2,
463 	[IB_RATE_40_GBPS] = 1
464 };
465 
466 static const char * const qib_sdma_state_names[] = {
467 	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
468 	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
469 	[qib_sdma_state_s20_idle]             = "s20_Idle",
470 	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
471 	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
472 	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
473 	[qib_sdma_state_s99_running]          = "s99_Running",
474 };
475 
476 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
477 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
478 
479 /* link training states, from IBC */
480 #define IB_7322_LT_STATE_DISABLED        0x00
481 #define IB_7322_LT_STATE_LINKUP          0x01
482 #define IB_7322_LT_STATE_POLLACTIVE      0x02
483 #define IB_7322_LT_STATE_POLLQUIET       0x03
484 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
485 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
486 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
487 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
488 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
489 #define IB_7322_LT_STATE_CFGIDLE         0x0b
490 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
491 #define IB_7322_LT_STATE_TXREVLANES      0x0d
492 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
493 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
494 #define IB_7322_LT_STATE_CFGENH          0x10
495 #define IB_7322_LT_STATE_CFGTEST         0x11
496 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
497 #define IB_7322_LT_STATE_CFGWAITENH      0x13
498 
499 /* link state machine states from IBC */
500 #define IB_7322_L_STATE_DOWN             0x0
501 #define IB_7322_L_STATE_INIT             0x1
502 #define IB_7322_L_STATE_ARM              0x2
503 #define IB_7322_L_STATE_ACTIVE           0x3
504 #define IB_7322_L_STATE_ACT_DEFER        0x4
505 
506 static const u8 qib_7322_physportstate[0x20] = {
507 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
508 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
509 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
510 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
511 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
512 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
513 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
514 	[IB_7322_LT_STATE_CFGRCVFCFG] =
515 		IB_PHYSPORTSTATE_CFG_TRAIN,
516 	[IB_7322_LT_STATE_CFGWAITRMT] =
517 		IB_PHYSPORTSTATE_CFG_TRAIN,
518 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
519 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
520 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
521 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
522 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
523 	[IB_7322_LT_STATE_RECOVERIDLE] =
524 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
525 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
526 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
527 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
528 		IB_PHYSPORTSTATE_CFG_TRAIN,
529 	[IB_7322_LT_STATE_CFGWAITENH] =
530 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
531 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
532 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
533 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
534 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
535 };
536 
537 #ifdef CONFIG_INFINIBAND_QIB_DCA
538 struct qib_irq_notify {
539 	int rcv;
540 	void *arg;
541 	struct irq_affinity_notify notify;
542 };
543 #endif
544 
545 struct qib_chip_specific {
546 	u64 __iomem *cregbase;
547 	u64 *cntrs;
548 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
549 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
550 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
551 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
552 	u64 errormask;
553 	u64 hwerrmask;
554 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
555 	u64 gpio_mask; /* shadow the gpio mask register */
556 	u64 extctrl; /* shadow the gpio output enable, etc... */
557 	u32 ncntrs;
558 	u32 nportcntrs;
559 	u32 cntrnamelen;
560 	u32 portcntrnamelen;
561 	u32 numctxts;
562 	u32 rcvegrcnt;
563 	u32 updthresh; /* current AvailUpdThld */
564 	u32 updthresh_dflt; /* default AvailUpdThld */
565 	u32 r1;
566 	u32 num_msix_entries;
567 	u32 sdmabufcnt;
568 	u32 lastbuf_for_pio;
569 	u32 stay_in_freeze;
570 	u32 recovery_ports_initted;
571 #ifdef CONFIG_INFINIBAND_QIB_DCA
572 	u32 dca_ctrl;
573 	int rhdr_cpu[18];
574 	int sdma_cpu[2];
575 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
576 #endif
577 	struct qib_msix_entry *msix_entries;
578 	unsigned long *sendchkenable;
579 	unsigned long *sendgrhchk;
580 	unsigned long *sendibchk;
581 	u32 rcvavail_timeout[18];
582 	char emsgbuf[128]; /* for device error interrupt msg buffer */
583 };
584 
585 /* Table of entries in "human readable" form Tx Emphasis. */
586 struct txdds_ent {
587 	u8 amp;
588 	u8 pre;
589 	u8 main;
590 	u8 post;
591 };
592 
593 struct vendor_txdds_ent {
594 	u8 oui[QSFP_VOUI_LEN];
595 	u8 *partnum;
596 	struct txdds_ent sdr;
597 	struct txdds_ent ddr;
598 	struct txdds_ent qdr;
599 };
600 
601 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
602 
603 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
604 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
605 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
606 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
607 
608 #define H1_FORCE_VAL 8
609 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
610 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
611 
612 /* The static and dynamic registers are paired, and the pairs indexed by spd */
613 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
614 	+ ((spd) * 2))
615 
616 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
617 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
618 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
619 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
620 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
621 
622 struct qib_chippport_specific {
623 	u64 __iomem *kpregbase;
624 	u64 __iomem *cpregbase;
625 	u64 *portcntrs;
626 	struct qib_pportdata *ppd;
627 	wait_queue_head_t autoneg_wait;
628 	struct delayed_work autoneg_work;
629 	struct delayed_work ipg_work;
630 	struct timer_list chase_timer;
631 	/*
632 	 * these 5 fields are used to establish deltas for IB symbol
633 	 * errors and linkrecovery errors.  They can be reported on
634 	 * some chips during link negotiation prior to INIT, and with
635 	 * DDR when faking DDR negotiations with non-IBTA switches.
636 	 * The chip counters are adjusted at driver unload if there is
637 	 * a non-zero delta.
638 	 */
639 	u64 ibdeltainprog;
640 	u64 ibsymdelta;
641 	u64 ibsymsnap;
642 	u64 iblnkerrdelta;
643 	u64 iblnkerrsnap;
644 	u64 iblnkdownsnap;
645 	u64 iblnkdowndelta;
646 	u64 ibmalfdelta;
647 	u64 ibmalfsnap;
648 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
649 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
650 	unsigned long qdr_dfe_time;
651 	unsigned long chase_end;
652 	u32 autoneg_tries;
653 	u32 recovery_init;
654 	u32 qdr_dfe_on;
655 	u32 qdr_reforce;
656 	/*
657 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
658 	 * entry zero is unused, to simplify indexing
659 	 */
660 	u8 h1_val;
661 	u8 no_eep;  /* txselect table index to use if no qsfp info */
662 	u8 ipg_tries;
663 	u8 ibmalfusesnap;
664 	struct qib_qsfp_data qsfp_data;
665 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
666 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
667 };
668 
669 static struct {
670 	const char *name;
671 	irq_handler_t handler;
672 	int lsb;
673 	int port; /* 0 if not port-specific, else port # */
674 	int dca;
675 } irq_table[] = {
676 	{ "", qib_7322intr, -1, 0, 0 },
677 	{ " (buf avail)", qib_7322bufavail,
678 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
679 	{ " (sdma 0)", sdma_intr,
680 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
681 	{ " (sdma 1)", sdma_intr,
682 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
683 	{ " (sdmaI 0)", sdma_idle_intr,
684 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
685 	{ " (sdmaI 1)", sdma_idle_intr,
686 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
687 	{ " (sdmaP 0)", sdma_progress_intr,
688 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
689 	{ " (sdmaP 1)", sdma_progress_intr,
690 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
691 	{ " (sdmaC 0)", sdma_cleanup_intr,
692 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
693 	{ " (sdmaC 1)", sdma_cleanup_intr,
694 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
695 };
696 
697 #ifdef CONFIG_INFINIBAND_QIB_DCA
698 
699 static const struct dca_reg_map {
700 	int     shadow_inx;
701 	int     lsb;
702 	u64     mask;
703 	u16     regno;
704 } dca_rcvhdr_reg_map[] = {
705 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
706 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
707 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
708 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
709 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
710 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
711 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
712 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
713 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
714 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
715 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
716 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
717 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
718 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
719 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
720 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
721 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
722 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
723 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
724 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
725 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
726 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
727 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
728 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
729 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
730 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
731 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
732 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
733 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
734 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
735 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
736 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
737 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
738 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
739 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
740 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
741 };
742 #endif
743 
744 /* ibcctrl bits */
745 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
746 /* cycle through TS1/TS2 till OK */
747 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
748 /* wait for TS1, then go on */
749 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
750 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
751 
752 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
753 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
754 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
755 
756 #define BLOB_7322_IBCHG 0x101
757 
758 static inline void qib_write_kreg(const struct qib_devdata *dd,
759 				  const u32 regno, u64 value);
760 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
761 static void write_7322_initregs(struct qib_devdata *);
762 static void write_7322_init_portregs(struct qib_pportdata *);
763 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
764 static void check_7322_rxe_status(struct qib_pportdata *);
765 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
766 #ifdef CONFIG_INFINIBAND_QIB_DCA
767 static void qib_setup_dca(struct qib_devdata *dd);
768 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
769 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
770 #endif
771 
772 /**
773  * qib_read_ureg32 - read 32-bit virtualized per-context register
774  * @dd: device
775  * @regno: register number
776  * @ctxt: context number
777  *
778  * Return the contents of a register that is virtualized to be per context.
779  * Returns -1 on errors (not distinguishable from valid contents at
780  * runtime; we may add a separate error variable at some point).
781  */
qib_read_ureg32(const struct qib_devdata * dd,enum qib_ureg regno,int ctxt)782 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
783 				  enum qib_ureg regno, int ctxt)
784 {
785 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
786 		return 0;
787 	return readl(regno + (u64 __iomem *)(
788 		(dd->ureg_align * ctxt) + (dd->userbase ?
789 		 (char __iomem *)dd->userbase :
790 		 (char __iomem *)dd->kregbase + dd->uregbase)));
791 }
792 
793 /**
794  * qib_read_ureg - read virtualized per-context register
795  * @dd: device
796  * @regno: register number
797  * @ctxt: context number
798  *
799  * Return the contents of a register that is virtualized to be per context.
800  * Returns -1 on errors (not distinguishable from valid contents at
801  * runtime; we may add a separate error variable at some point).
802  */
qib_read_ureg(const struct qib_devdata * dd,enum qib_ureg regno,int ctxt)803 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
804 				enum qib_ureg regno, int ctxt)
805 {
806 
807 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
808 		return 0;
809 	return readq(regno + (u64 __iomem *)(
810 		(dd->ureg_align * ctxt) + (dd->userbase ?
811 		 (char __iomem *)dd->userbase :
812 		 (char __iomem *)dd->kregbase + dd->uregbase)));
813 }
814 
815 /**
816  * qib_write_ureg - write virtualized per-context register
817  * @dd: device
818  * @regno: register number
819  * @value: value
820  * @ctxt: context
821  *
822  * Write the contents of a register that is virtualized to be per context.
823  */
qib_write_ureg(const struct qib_devdata * dd,enum qib_ureg regno,u64 value,int ctxt)824 static inline void qib_write_ureg(const struct qib_devdata *dd,
825 				  enum qib_ureg regno, u64 value, int ctxt)
826 {
827 	u64 __iomem *ubase;
828 
829 	if (dd->userbase)
830 		ubase = (u64 __iomem *)
831 			((char __iomem *) dd->userbase +
832 			 dd->ureg_align * ctxt);
833 	else
834 		ubase = (u64 __iomem *)
835 			(dd->uregbase +
836 			 (char __iomem *) dd->kregbase +
837 			 dd->ureg_align * ctxt);
838 
839 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
840 		writeq(value, &ubase[regno]);
841 }
842 
qib_read_kreg32(const struct qib_devdata * dd,const u32 regno)843 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
844 				  const u32 regno)
845 {
846 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
847 		return -1;
848 	return readl((u32 __iomem *) &dd->kregbase[regno]);
849 }
850 
qib_read_kreg64(const struct qib_devdata * dd,const u32 regno)851 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
852 				  const u32 regno)
853 {
854 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
855 		return -1;
856 	return readq(&dd->kregbase[regno]);
857 }
858 
qib_write_kreg(const struct qib_devdata * dd,const u32 regno,u64 value)859 static inline void qib_write_kreg(const struct qib_devdata *dd,
860 				  const u32 regno, u64 value)
861 {
862 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
863 		writeq(value, &dd->kregbase[regno]);
864 }
865 
866 /*
867  * not many sanity checks for the port-specific kernel register routines,
868  * since they are only used when it's known to be safe.
869 */
qib_read_kreg_port(const struct qib_pportdata * ppd,const u16 regno)870 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
871 				     const u16 regno)
872 {
873 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
874 		return 0ULL;
875 	return readq(&ppd->cpspec->kpregbase[regno]);
876 }
877 
qib_write_kreg_port(const struct qib_pportdata * ppd,const u16 regno,u64 value)878 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
879 				       const u16 regno, u64 value)
880 {
881 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
882 	    (ppd->dd->flags & QIB_PRESENT))
883 		writeq(value, &ppd->cpspec->kpregbase[regno]);
884 }
885 
886 /**
887  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
888  * @dd: the qlogic_ib device
889  * @regno: the register number to write
890  * @ctxt: the context containing the register
891  * @value: the value to write
892  */
qib_write_kreg_ctxt(const struct qib_devdata * dd,const u16 regno,unsigned ctxt,u64 value)893 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
894 				       const u16 regno, unsigned ctxt,
895 				       u64 value)
896 {
897 	qib_write_kreg(dd, regno + ctxt, value);
898 }
899 
read_7322_creg(const struct qib_devdata * dd,u16 regno)900 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
901 {
902 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
903 		return 0;
904 	return readq(&dd->cspec->cregbase[regno]);
905 
906 
907 }
908 
read_7322_creg32(const struct qib_devdata * dd,u16 regno)909 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
910 {
911 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
912 		return 0;
913 	return readl(&dd->cspec->cregbase[regno]);
914 
915 
916 }
917 
write_7322_creg_port(const struct qib_pportdata * ppd,u16 regno,u64 value)918 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
919 					u16 regno, u64 value)
920 {
921 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
922 	    (ppd->dd->flags & QIB_PRESENT))
923 		writeq(value, &ppd->cpspec->cpregbase[regno]);
924 }
925 
read_7322_creg_port(const struct qib_pportdata * ppd,u16 regno)926 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
927 				      u16 regno)
928 {
929 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930 	    !(ppd->dd->flags & QIB_PRESENT))
931 		return 0;
932 	return readq(&ppd->cpspec->cpregbase[regno]);
933 }
934 
read_7322_creg32_port(const struct qib_pportdata * ppd,u16 regno)935 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
936 					u16 regno)
937 {
938 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
939 	    !(ppd->dd->flags & QIB_PRESENT))
940 		return 0;
941 	return readl(&ppd->cpspec->cpregbase[regno]);
942 }
943 
944 /* bits in Control register */
945 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
946 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
947 
948 /* bits in general interrupt regs */
949 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
950 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
951 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
952 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
953 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
954 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
955 #define QIB_I_C_ERROR INT_MASK(Err)
956 
957 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
958 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
959 #define QIB_I_GPIO INT_MASK(AssertGPIO)
960 #define QIB_I_P_SDMAINT(pidx) \
961 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
962 	 INT_MASK_P(SDmaProgress, pidx) | \
963 	 INT_MASK_PM(SDmaCleanupDone, pidx))
964 
965 /* Interrupt bits that are "per port" */
966 #define QIB_I_P_BITSEXTANT(pidx) \
967 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
968 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
969 	INT_MASK_P(SDmaProgress, pidx) | \
970 	INT_MASK_PM(SDmaCleanupDone, pidx))
971 
972 /* Interrupt bits that are common to a device */
973 /* currently unused: QIB_I_SPIOSENT */
974 #define QIB_I_C_BITSEXTANT \
975 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
976 	QIB_I_SPIOSENT | \
977 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
978 
979 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
980 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
981 
982 /*
983  * Error bits that are "per port".
984  */
985 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
986 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
987 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
988 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
989 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
990 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
991 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
992 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
993 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
994 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
995 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
996 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
997 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
998 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
999 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
1000 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
1001 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
1002 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
1003 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
1004 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
1005 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
1006 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1007 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1008 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1009 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1010 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1011 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1012 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1013 
1014 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1015 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1016 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1017 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1018 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1019 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1020 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1021 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1022 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1023 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1024 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1025 
1026 /* Error bits that are common to a device */
1027 #define QIB_E_RESET ERR_MASK(ResetNegated)
1028 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1029 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1030 
1031 
1032 /*
1033  * Per chip (rather than per-port) errors.  Most either do
1034  * nothing but trigger a print (because they self-recover, or
1035  * always occur in tandem with other errors that handle the
1036  * issue), or because they indicate errors with no recovery,
1037  * but we want to know that they happened.
1038  */
1039 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1040 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1041 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1042 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1043 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1044 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1045 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1046 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1047 
1048 /* SDMA chip errors (not per port)
1049  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1050  * the SDMAHALT error immediately, so we just print the dup error via the
1051  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1052  * as well, but since this is port-independent, by definition, it's
1053  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1054  * packet send errors, and so are handled in the same manner as other
1055  * per-packet errors.
1056  */
1057 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1058 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1059 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1060 
1061 /*
1062  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1063  * it is used to print "common" packet errors.
1064  */
1065 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1066 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1067 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1068 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1069 	QIB_E_P_REBP)
1070 
1071 /* Error Bits that Packet-related (Receive, per-port) */
1072 #define QIB_E_P_RPKTERRS (\
1073 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1074 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1075 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1076 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1077 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1078 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1079 
1080 /*
1081  * Error bits that are Send-related (per port)
1082  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1083  * All of these potentially need to have a buffer disarmed
1084  */
1085 #define QIB_E_P_SPKTERRS (\
1086 	QIB_E_P_SUNEXP_PKTNUM |\
1087 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1088 	QIB_E_P_SMAXPKTLEN |\
1089 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1090 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1091 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1092 
1093 #define QIB_E_SPKTERRS ( \
1094 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1095 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1096 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1097 
1098 #define QIB_E_P_SDMAERRS ( \
1099 	QIB_E_P_SDMAHALT | \
1100 	QIB_E_P_SDMADESCADDRMISALIGN | \
1101 	QIB_E_P_SDMAUNEXPDATA | \
1102 	QIB_E_P_SDMAMISSINGDW | \
1103 	QIB_E_P_SDMADWEN | \
1104 	QIB_E_P_SDMARPYTAG | \
1105 	QIB_E_P_SDMA1STDESC | \
1106 	QIB_E_P_SDMABASE | \
1107 	QIB_E_P_SDMATAILOUTOFBOUND | \
1108 	QIB_E_P_SDMAOUTOFBOUND | \
1109 	QIB_E_P_SDMAGENMISMATCH)
1110 
1111 /*
1112  * This sets some bits more than once, but makes it more obvious which
1113  * bits are not handled under other categories, and the repeat definition
1114  * is not a problem.
1115  */
1116 #define QIB_E_P_BITSEXTANT ( \
1117 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1118 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1119 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1120 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1121 	)
1122 
1123 /*
1124  * These are errors that can occur when the link
1125  * changes state while a packet is being sent or received.  This doesn't
1126  * cover things like EBP or VCRC that can be the result of a sending
1127  * having the link change state, so we receive a "known bad" packet.
1128  * All of these are "per port", so renamed:
1129  */
1130 #define QIB_E_P_LINK_PKTERRS (\
1131 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1132 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1133 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1134 	QIB_E_P_RUNEXPCHAR)
1135 
1136 /*
1137  * This sets some bits more than once, but makes it more obvious which
1138  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1139  * and the repeat definition is not a problem.
1140  */
1141 #define QIB_E_C_BITSEXTANT (\
1142 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1143 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1144 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1145 
1146 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1147 #define E_SPKT_ERRS_IGNORE 0
1148 
1149 #define QIB_EXTS_MEMBIST_DISABLED \
1150 	SYM_MASK(EXTStatus, MemBISTDisabled)
1151 #define QIB_EXTS_MEMBIST_ENDTEST \
1152 	SYM_MASK(EXTStatus, MemBISTEndTest)
1153 
1154 #define QIB_E_SPIOARMLAUNCH \
1155 	ERR_MASK(SendArmLaunchErr)
1156 
1157 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1158 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1159 
1160 /*
1161  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1162  * and also if forced QDR (only QDR enabled).  It's enabled for the
1163  * forced QDR case so that scrambling will be enabled by the TS3
1164  * exchange, when supported by both sides of the link.
1165  */
1166 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1167 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1168 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1169 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1170 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1171 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1172 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1173 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1174 
1175 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1176 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1177 
1178 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1179 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1180 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1181 
1182 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1183 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1184 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1185 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1186 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1187 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1188 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1189 
1190 #define IBA7322_REDIRECT_VEC_PER_REG 12
1191 
1192 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1193 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1194 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1195 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1196 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1197 
1198 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1199 
1200 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1201 	.msg = #fldname , .sz = sizeof(#fldname) }
1202 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1203 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1204 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1205 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1206 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1207 	HWE_AUTO(PCIESerdesPClkNotDetect),
1208 	HWE_AUTO(PowerOnBISTFailed),
1209 	HWE_AUTO(TempsenseTholdReached),
1210 	HWE_AUTO(MemoryErr),
1211 	HWE_AUTO(PCIeBusParityErr),
1212 	HWE_AUTO(PcieCplTimeout),
1213 	HWE_AUTO(PciePoisonedTLP),
1214 	HWE_AUTO_P(SDmaMemReadErr, 1),
1215 	HWE_AUTO_P(SDmaMemReadErr, 0),
1216 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1217 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1218 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1219 	HWE_AUTO(statusValidNoEop),
1220 	HWE_AUTO(LATriggered),
1221 	{ .mask = 0, .sz = 0 }
1222 };
1223 
1224 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1225 	.msg = #fldname, .sz = sizeof(#fldname) }
1226 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1227 	.msg = #fldname, .sz = sizeof(#fldname) }
1228 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1229 	E_AUTO(RcvEgrFullErr),
1230 	E_AUTO(RcvHdrFullErr),
1231 	E_AUTO(ResetNegated),
1232 	E_AUTO(HardwareErr),
1233 	E_AUTO(InvalidAddrErr),
1234 	E_AUTO(SDmaVL15Err),
1235 	E_AUTO(SBufVL15MisUseErr),
1236 	E_AUTO(InvalidEEPCmd),
1237 	E_AUTO(RcvContextShareErr),
1238 	E_AUTO(SendVLMismatchErr),
1239 	E_AUTO(SendArmLaunchErr),
1240 	E_AUTO(SendSpecialTriggerErr),
1241 	E_AUTO(SDmaWrongPortErr),
1242 	E_AUTO(SDmaBufMaskDuplicateErr),
1243 	{ .mask = 0, .sz = 0 }
1244 };
1245 
1246 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1247 	E_P_AUTO(IBStatusChanged),
1248 	E_P_AUTO(SHeadersErr),
1249 	E_P_AUTO(VL15BufMisuseErr),
1250 	/*
1251 	 * SDmaHaltErr is not really an error, make it clearer;
1252 	 */
1253 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1254 		.sz = 11},
1255 	E_P_AUTO(SDmaDescAddrMisalignErr),
1256 	E_P_AUTO(SDmaUnexpDataErr),
1257 	E_P_AUTO(SDmaMissingDwErr),
1258 	E_P_AUTO(SDmaDwEnErr),
1259 	E_P_AUTO(SDmaRpyTagErr),
1260 	E_P_AUTO(SDma1stDescErr),
1261 	E_P_AUTO(SDmaBaseErr),
1262 	E_P_AUTO(SDmaTailOutOfBoundErr),
1263 	E_P_AUTO(SDmaOutOfBoundErr),
1264 	E_P_AUTO(SDmaGenMismatchErr),
1265 	E_P_AUTO(SendBufMisuseErr),
1266 	E_P_AUTO(SendUnsupportedVLErr),
1267 	E_P_AUTO(SendUnexpectedPktNumErr),
1268 	E_P_AUTO(SendDroppedDataPktErr),
1269 	E_P_AUTO(SendDroppedSmpPktErr),
1270 	E_P_AUTO(SendPktLenErr),
1271 	E_P_AUTO(SendUnderRunErr),
1272 	E_P_AUTO(SendMaxPktLenErr),
1273 	E_P_AUTO(SendMinPktLenErr),
1274 	E_P_AUTO(RcvIBLostLinkErr),
1275 	E_P_AUTO(RcvHdrErr),
1276 	E_P_AUTO(RcvHdrLenErr),
1277 	E_P_AUTO(RcvBadTidErr),
1278 	E_P_AUTO(RcvBadVersionErr),
1279 	E_P_AUTO(RcvIBFlowErr),
1280 	E_P_AUTO(RcvEBPErr),
1281 	E_P_AUTO(RcvUnsupportedVLErr),
1282 	E_P_AUTO(RcvUnexpectedCharErr),
1283 	E_P_AUTO(RcvShortPktLenErr),
1284 	E_P_AUTO(RcvLongPktLenErr),
1285 	E_P_AUTO(RcvMaxPktLenErr),
1286 	E_P_AUTO(RcvMinPktLenErr),
1287 	E_P_AUTO(RcvICRCErr),
1288 	E_P_AUTO(RcvVCRCErr),
1289 	E_P_AUTO(RcvFormatErr),
1290 	{ .mask = 0, .sz = 0 }
1291 };
1292 
1293 /*
1294  * Below generates "auto-message" for interrupts not specific to any port or
1295  * context
1296  */
1297 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1298 	.msg = #fldname, .sz = sizeof(#fldname) }
1299 /* Below generates "auto-message" for interrupts specific to a port */
1300 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1301 	SYM_LSB(IntMask, fldname##Mask##_0), \
1302 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1303 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1304 /* For some reason, the SerDesTrimDone bits are reversed */
1305 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1306 	SYM_LSB(IntMask, fldname##Mask##_1), \
1307 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1308 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1309 /*
1310  * Below generates "auto-message" for interrupts specific to a context,
1311  * with ctxt-number appended
1312  */
1313 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1314 	SYM_LSB(IntMask, fldname##0IntMask), \
1315 	SYM_LSB(IntMask, fldname##17IntMask)), \
1316 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1317 
1318 #define TXSYMPTOM_AUTO_P(fldname) \
1319 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1320 	.msg = #fldname, .sz = sizeof(#fldname) }
1321 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1322 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1323 	TXSYMPTOM_AUTO_P(GRHFail),
1324 	TXSYMPTOM_AUTO_P(PkeyFail),
1325 	TXSYMPTOM_AUTO_P(QPFail),
1326 	TXSYMPTOM_AUTO_P(SLIDFail),
1327 	TXSYMPTOM_AUTO_P(RawIPV6),
1328 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1329 	{ .mask = 0, .sz = 0 }
1330 };
1331 
1332 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1333 
1334 /*
1335  * Called when we might have an error that is specific to a particular
1336  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1337  * because we don't need to force the update of pioavail
1338  */
qib_disarm_7322_senderrbufs(struct qib_pportdata * ppd)1339 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1340 {
1341 	struct qib_devdata *dd = ppd->dd;
1342 	u32 i;
1343 	int any;
1344 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1345 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1346 	unsigned long sbuf[4];
1347 
1348 	/*
1349 	 * It's possible that sendbuffererror could have bits set; might
1350 	 * have already done this as a result of hardware error handling.
1351 	 */
1352 	any = 0;
1353 	for (i = 0; i < regcnt; ++i) {
1354 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1355 		if (sbuf[i]) {
1356 			any = 1;
1357 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1358 		}
1359 	}
1360 
1361 	if (any)
1362 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1363 }
1364 
1365 /* No txe_recover yet, if ever */
1366 
1367 /* No decode__errors yet */
err_decode(char * msg,size_t len,u64 errs,const struct qib_hwerror_msgs * msp)1368 static void err_decode(char *msg, size_t len, u64 errs,
1369 		       const struct qib_hwerror_msgs *msp)
1370 {
1371 	u64 these, lmask;
1372 	int took, multi, n = 0;
1373 
1374 	while (errs && msp && msp->mask) {
1375 		multi = (msp->mask & (msp->mask - 1));
1376 		while (errs & msp->mask) {
1377 			these = (errs & msp->mask);
1378 			lmask = (these & (these - 1)) ^ these;
1379 			if (len) {
1380 				if (n++) {
1381 					/* separate the strings */
1382 					*msg++ = ',';
1383 					len--;
1384 				}
1385 				BUG_ON(!msp->sz);
1386 				/* msp->sz counts the nul */
1387 				took = min_t(size_t, msp->sz - (size_t)1, len);
1388 				memcpy(msg,  msp->msg, took);
1389 				len -= took;
1390 				msg += took;
1391 				if (len)
1392 					*msg = '\0';
1393 			}
1394 			errs &= ~lmask;
1395 			if (len && multi) {
1396 				/* More than one bit this mask */
1397 				int idx = -1;
1398 
1399 				while (lmask & msp->mask) {
1400 					++idx;
1401 					lmask >>= 1;
1402 				}
1403 				took = scnprintf(msg, len, "_%d", idx);
1404 				len -= took;
1405 				msg += took;
1406 			}
1407 		}
1408 		++msp;
1409 	}
1410 	/* If some bits are left, show in hex. */
1411 	if (len && errs)
1412 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1413 			(unsigned long long) errs);
1414 }
1415 
1416 /* only called if r1 set */
flush_fifo(struct qib_pportdata * ppd)1417 static void flush_fifo(struct qib_pportdata *ppd)
1418 {
1419 	struct qib_devdata *dd = ppd->dd;
1420 	u32 __iomem *piobuf;
1421 	u32 bufn;
1422 	u32 *hdr;
1423 	u64 pbc;
1424 	const unsigned hdrwords = 7;
1425 	static struct ib_header ibhdr = {
1426 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1427 		.lrh[1] = IB_LID_PERMISSIVE,
1428 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1429 		.lrh[3] = IB_LID_PERMISSIVE,
1430 		.u.oth.bth[0] = cpu_to_be32(
1431 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1432 		.u.oth.bth[1] = cpu_to_be32(0),
1433 		.u.oth.bth[2] = cpu_to_be32(0),
1434 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1435 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1436 	};
1437 
1438 	/*
1439 	 * Send a dummy VL15 packet to flush the launch FIFO.
1440 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1441 	 */
1442 	pbc = PBC_7322_VL15_SEND |
1443 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1444 		(hdrwords + SIZE_OF_CRC);
1445 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1446 	if (!piobuf)
1447 		return;
1448 	writeq(pbc, piobuf);
1449 	hdr = (u32 *) &ibhdr;
1450 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1451 		qib_flush_wc();
1452 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1453 		qib_flush_wc();
1454 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1455 		qib_flush_wc();
1456 	} else
1457 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1458 	qib_sendbuf_done(dd, bufn);
1459 }
1460 
1461 /*
1462  * This is called with interrupts disabled and sdma_lock held.
1463  */
qib_7322_sdma_sendctrl(struct qib_pportdata * ppd,unsigned op)1464 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1465 {
1466 	struct qib_devdata *dd = ppd->dd;
1467 	u64 set_sendctrl = 0;
1468 	u64 clr_sendctrl = 0;
1469 
1470 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1471 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1472 	else
1473 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1474 
1475 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1476 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1477 	else
1478 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1479 
1480 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1481 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1482 	else
1483 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1484 
1485 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1486 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1487 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1488 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1489 	else
1490 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1491 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1492 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1493 
1494 	spin_lock(&dd->sendctrl_lock);
1495 
1496 	/* If we are draining everything, block sends first */
1497 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1498 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1499 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1500 		qib_write_kreg(dd, kr_scratch, 0);
1501 	}
1502 
1503 	ppd->p_sendctrl |= set_sendctrl;
1504 	ppd->p_sendctrl &= ~clr_sendctrl;
1505 
1506 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1507 		qib_write_kreg_port(ppd, krp_sendctrl,
1508 				    ppd->p_sendctrl |
1509 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1510 	else
1511 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1512 	qib_write_kreg(dd, kr_scratch, 0);
1513 
1514 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1515 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1516 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1517 		qib_write_kreg(dd, kr_scratch, 0);
1518 	}
1519 
1520 	spin_unlock(&dd->sendctrl_lock);
1521 
1522 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1523 		flush_fifo(ppd);
1524 }
1525 
qib_7322_sdma_hw_clean_up(struct qib_pportdata * ppd)1526 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1527 {
1528 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1529 }
1530 
qib_sdma_7322_setlengen(struct qib_pportdata * ppd)1531 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1532 {
1533 	/*
1534 	 * Set SendDmaLenGen and clear and set
1535 	 * the MSB of the generation count to enable generation checking
1536 	 * and load the internal generation counter.
1537 	 */
1538 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1539 	qib_write_kreg_port(ppd, krp_senddmalengen,
1540 			    ppd->sdma_descq_cnt |
1541 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1542 }
1543 
1544 /*
1545  * Must be called with sdma_lock held, or before init finished.
1546  */
qib_sdma_update_7322_tail(struct qib_pportdata * ppd,u16 tail)1547 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1548 {
1549 	/* Commit writes to memory and advance the tail on the chip */
1550 	wmb();
1551 	ppd->sdma_descq_tail = tail;
1552 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1553 }
1554 
1555 /*
1556  * This is called with interrupts disabled and sdma_lock held.
1557  */
qib_7322_sdma_hw_start_up(struct qib_pportdata * ppd)1558 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1559 {
1560 	/*
1561 	 * Drain all FIFOs.
1562 	 * The hardware doesn't require this but we do it so that verbs
1563 	 * and user applications don't wait for link active to send stale
1564 	 * data.
1565 	 */
1566 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1567 
1568 	qib_sdma_7322_setlengen(ppd);
1569 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1570 	ppd->sdma_head_dma[0] = 0;
1571 	qib_7322_sdma_sendctrl(ppd,
1572 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1573 }
1574 
1575 #define DISABLES_SDMA ( \
1576 	QIB_E_P_SDMAHALT | \
1577 	QIB_E_P_SDMADESCADDRMISALIGN | \
1578 	QIB_E_P_SDMAMISSINGDW | \
1579 	QIB_E_P_SDMADWEN | \
1580 	QIB_E_P_SDMARPYTAG | \
1581 	QIB_E_P_SDMA1STDESC | \
1582 	QIB_E_P_SDMABASE | \
1583 	QIB_E_P_SDMATAILOUTOFBOUND | \
1584 	QIB_E_P_SDMAOUTOFBOUND | \
1585 	QIB_E_P_SDMAGENMISMATCH)
1586 
sdma_7322_p_errors(struct qib_pportdata * ppd,u64 errs)1587 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1588 {
1589 	unsigned long flags;
1590 	struct qib_devdata *dd = ppd->dd;
1591 
1592 	errs &= QIB_E_P_SDMAERRS;
1593 	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1594 		   errs, qib_7322p_error_msgs);
1595 
1596 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1597 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1598 			    ppd->port);
1599 
1600 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1601 
1602 	if (errs != QIB_E_P_SDMAHALT) {
1603 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1604 		qib_dev_porterr(dd, ppd->port,
1605 			"SDMA %s 0x%016llx %s\n",
1606 			qib_sdma_state_names[ppd->sdma_state.current_state],
1607 			errs, ppd->cpspec->sdmamsgbuf);
1608 		dump_sdma_7322_state(ppd);
1609 	}
1610 
1611 	switch (ppd->sdma_state.current_state) {
1612 	case qib_sdma_state_s00_hw_down:
1613 		break;
1614 
1615 	case qib_sdma_state_s10_hw_start_up_wait:
1616 		if (errs & QIB_E_P_SDMAHALT)
1617 			__qib_sdma_process_event(ppd,
1618 				qib_sdma_event_e20_hw_started);
1619 		break;
1620 
1621 	case qib_sdma_state_s20_idle:
1622 		break;
1623 
1624 	case qib_sdma_state_s30_sw_clean_up_wait:
1625 		break;
1626 
1627 	case qib_sdma_state_s40_hw_clean_up_wait:
1628 		if (errs & QIB_E_P_SDMAHALT)
1629 			__qib_sdma_process_event(ppd,
1630 				qib_sdma_event_e50_hw_cleaned);
1631 		break;
1632 
1633 	case qib_sdma_state_s50_hw_halt_wait:
1634 		if (errs & QIB_E_P_SDMAHALT)
1635 			__qib_sdma_process_event(ppd,
1636 				qib_sdma_event_e60_hw_halted);
1637 		break;
1638 
1639 	case qib_sdma_state_s99_running:
1640 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1641 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1642 		break;
1643 	}
1644 
1645 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1646 }
1647 
1648 /*
1649  * handle per-device errors (not per-port errors)
1650  */
handle_7322_errors(struct qib_devdata * dd)1651 static noinline void handle_7322_errors(struct qib_devdata *dd)
1652 {
1653 	char *msg;
1654 	u64 iserr = 0;
1655 	u64 errs;
1656 	u64 mask;
1657 
1658 	qib_stats.sps_errints++;
1659 	errs = qib_read_kreg64(dd, kr_errstatus);
1660 	if (!errs) {
1661 		qib_devinfo(dd->pcidev,
1662 			"device error interrupt, but no error bits set!\n");
1663 		goto done;
1664 	}
1665 
1666 	/* don't report errors that are masked */
1667 	errs &= dd->cspec->errormask;
1668 	msg = dd->cspec->emsgbuf;
1669 
1670 	/* do these first, they are most important */
1671 	if (errs & QIB_E_HARDWARE) {
1672 		*msg = '\0';
1673 		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1674 	}
1675 
1676 	if (errs & QIB_E_SPKTERRS) {
1677 		qib_disarm_7322_senderrbufs(dd->pport);
1678 		qib_stats.sps_txerrs++;
1679 	} else if (errs & QIB_E_INVALIDADDR)
1680 		qib_stats.sps_txerrs++;
1681 	else if (errs & QIB_E_ARMLAUNCH) {
1682 		qib_stats.sps_txerrs++;
1683 		qib_disarm_7322_senderrbufs(dd->pport);
1684 	}
1685 	qib_write_kreg(dd, kr_errclear, errs);
1686 
1687 	/*
1688 	 * The ones we mask off are handled specially below
1689 	 * or above.  Also mask SDMADISABLED by default as it
1690 	 * is too chatty.
1691 	 */
1692 	mask = QIB_E_HARDWARE;
1693 	*msg = '\0';
1694 
1695 	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1696 		   qib_7322error_msgs);
1697 
1698 	/*
1699 	 * Getting reset is a tragedy for all ports. Mark the device
1700 	 * _and_ the ports as "offline" in way meaningful to each.
1701 	 */
1702 	if (errs & QIB_E_RESET) {
1703 		int pidx;
1704 
1705 		qib_dev_err(dd,
1706 			"Got reset, requires re-init (unload and reload driver)\n");
1707 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1708 		/* mark as having had error */
1709 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1710 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1711 			if (dd->pport[pidx].link_speed_supported)
1712 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1713 	}
1714 
1715 	if (*msg && iserr)
1716 		qib_dev_err(dd, "%s error\n", msg);
1717 
1718 	/*
1719 	 * If there were hdrq or egrfull errors, wake up any processes
1720 	 * waiting in poll.  We used to try to check which contexts had
1721 	 * the overflow, but given the cost of that and the chip reads
1722 	 * to support it, it's better to just wake everybody up if we
1723 	 * get an overflow; waiters can poll again if it's not them.
1724 	 */
1725 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1726 		qib_handle_urcv(dd, ~0U);
1727 		if (errs & ERR_MASK(RcvEgrFullErr))
1728 			qib_stats.sps_buffull++;
1729 		else
1730 			qib_stats.sps_hdrfull++;
1731 	}
1732 
1733 done:
1734 	return;
1735 }
1736 
qib_error_tasklet(unsigned long data)1737 static void qib_error_tasklet(unsigned long data)
1738 {
1739 	struct qib_devdata *dd = (struct qib_devdata *)data;
1740 
1741 	handle_7322_errors(dd);
1742 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1743 }
1744 
reenable_chase(struct timer_list * t)1745 static void reenable_chase(struct timer_list *t)
1746 {
1747 	struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1748 	struct qib_pportdata *ppd = cp->ppd;
1749 
1750 	ppd->cpspec->chase_timer.expires = 0;
1751 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1752 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1753 }
1754 
disable_chase(struct qib_pportdata * ppd,unsigned long tnow,u8 ibclt)1755 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1756 		u8 ibclt)
1757 {
1758 	ppd->cpspec->chase_end = 0;
1759 
1760 	if (!qib_chase)
1761 		return;
1762 
1763 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1764 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1765 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1766 	add_timer(&ppd->cpspec->chase_timer);
1767 }
1768 
handle_serdes_issues(struct qib_pportdata * ppd,u64 ibcst)1769 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1770 {
1771 	u8 ibclt;
1772 	unsigned long tnow;
1773 
1774 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1775 
1776 	/*
1777 	 * Detect and handle the state chase issue, where we can
1778 	 * get stuck if we are unlucky on timing on both sides of
1779 	 * the link.   If we are, we disable, set a timer, and
1780 	 * then re-enable.
1781 	 */
1782 	switch (ibclt) {
1783 	case IB_7322_LT_STATE_CFGRCVFCFG:
1784 	case IB_7322_LT_STATE_CFGWAITRMT:
1785 	case IB_7322_LT_STATE_TXREVLANES:
1786 	case IB_7322_LT_STATE_CFGENH:
1787 		tnow = jiffies;
1788 		if (ppd->cpspec->chase_end &&
1789 		     time_after(tnow, ppd->cpspec->chase_end))
1790 			disable_chase(ppd, tnow, ibclt);
1791 		else if (!ppd->cpspec->chase_end)
1792 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1793 		break;
1794 	default:
1795 		ppd->cpspec->chase_end = 0;
1796 		break;
1797 	}
1798 
1799 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1800 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1801 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1802 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1803 		force_h1(ppd);
1804 		ppd->cpspec->qdr_reforce = 1;
1805 		if (!ppd->dd->cspec->r1)
1806 			serdes_7322_los_enable(ppd, 0);
1807 	} else if (ppd->cpspec->qdr_reforce &&
1808 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1809 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1810 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1811 		ibclt == IB_7322_LT_STATE_LINKUP))
1812 		force_h1(ppd);
1813 
1814 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1815 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1816 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1817 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1818 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1819 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1820 		adj_tx_serdes(ppd);
1821 
1822 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1823 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1824 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1825 					  LinkTrainingState);
1826 		if (!ppd->dd->cspec->r1 &&
1827 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1828 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1829 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1830 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1831 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1832 			/* If the link went down (but no into recovery,
1833 			 * turn LOS back on */
1834 			serdes_7322_los_enable(ppd, 1);
1835 		if (!ppd->cpspec->qdr_dfe_on &&
1836 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1837 			ppd->cpspec->qdr_dfe_on = 1;
1838 			ppd->cpspec->qdr_dfe_time = 0;
1839 			/* On link down, reenable QDR adaptation */
1840 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1841 					    ppd->dd->cspec->r1 ?
1842 					    QDR_STATIC_ADAPT_DOWN_R1 :
1843 					    QDR_STATIC_ADAPT_DOWN);
1844 			pr_info(
1845 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1846 				ppd->dd->unit, ppd->port, ibclt);
1847 		}
1848 	}
1849 }
1850 
1851 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1852 
1853 /*
1854  * This is per-pport error handling.
1855  * will likely get it's own MSIx interrupt (one for each port,
1856  * although just a single handler).
1857  */
handle_7322_p_errors(struct qib_pportdata * ppd)1858 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1859 {
1860 	char *msg;
1861 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1862 	struct qib_devdata *dd = ppd->dd;
1863 
1864 	/* do this as soon as possible */
1865 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1866 	if (!fmask)
1867 		check_7322_rxe_status(ppd);
1868 
1869 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1870 	if (!errs)
1871 		qib_devinfo(dd->pcidev,
1872 			 "Port%d error interrupt, but no error bits set!\n",
1873 			 ppd->port);
1874 	if (!fmask)
1875 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1876 	if (!errs)
1877 		goto done;
1878 
1879 	msg = ppd->cpspec->epmsgbuf;
1880 	*msg = '\0';
1881 
1882 	if (errs & ~QIB_E_P_BITSEXTANT) {
1883 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1884 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1885 		if (!*msg)
1886 			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1887 				 "no others");
1888 		qib_dev_porterr(dd, ppd->port,
1889 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1890 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1891 		*msg = '\0';
1892 	}
1893 
1894 	if (errs & QIB_E_P_SHDR) {
1895 		u64 symptom;
1896 
1897 		/* determine cause, then write to clear */
1898 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1899 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1900 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1901 			   hdrchk_msgs);
1902 		*msg = '\0';
1903 		/* senderrbuf cleared in SPKTERRS below */
1904 	}
1905 
1906 	if (errs & QIB_E_P_SPKTERRS) {
1907 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1908 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1909 			/*
1910 			 * This can happen when trying to bring the link
1911 			 * up, but the IB link changes state at the "wrong"
1912 			 * time. The IB logic then complains that the packet
1913 			 * isn't valid.  We don't want to confuse people, so
1914 			 * we just don't print them, except at debug
1915 			 */
1916 			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1917 				   (errs & QIB_E_P_LINK_PKTERRS),
1918 				   qib_7322p_error_msgs);
1919 			*msg = '\0';
1920 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1921 		}
1922 		qib_disarm_7322_senderrbufs(ppd);
1923 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1924 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1925 		/*
1926 		 * This can happen when SMA is trying to bring the link
1927 		 * up, but the IB link changes state at the "wrong" time.
1928 		 * The IB logic then complains that the packet isn't
1929 		 * valid.  We don't want to confuse people, so we just
1930 		 * don't print them, except at debug
1931 		 */
1932 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1933 			   qib_7322p_error_msgs);
1934 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1935 		*msg = '\0';
1936 	}
1937 
1938 	qib_write_kreg_port(ppd, krp_errclear, errs);
1939 
1940 	errs &= ~ignore_this_time;
1941 	if (!errs)
1942 		goto done;
1943 
1944 	if (errs & QIB_E_P_RPKTERRS)
1945 		qib_stats.sps_rcverrs++;
1946 	if (errs & QIB_E_P_SPKTERRS)
1947 		qib_stats.sps_txerrs++;
1948 
1949 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1950 
1951 	if (errs & QIB_E_P_SDMAERRS)
1952 		sdma_7322_p_errors(ppd, errs);
1953 
1954 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1955 		u64 ibcs;
1956 		u8 ltstate;
1957 
1958 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1959 		ltstate = qib_7322_phys_portstate(ibcs);
1960 
1961 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1962 			handle_serdes_issues(ppd, ibcs);
1963 		if (!(ppd->cpspec->ibcctrl_a &
1964 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1965 			/*
1966 			 * We got our interrupt, so init code should be
1967 			 * happy and not try alternatives. Now squelch
1968 			 * other "chatter" from link-negotiation (pre Init)
1969 			 */
1970 			ppd->cpspec->ibcctrl_a |=
1971 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1972 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1973 					    ppd->cpspec->ibcctrl_a);
1974 		}
1975 
1976 		/* Update our picture of width and speed from chip */
1977 		ppd->link_width_active =
1978 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1979 			    IB_WIDTH_4X : IB_WIDTH_1X;
1980 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1981 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1982 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1983 				   QIB_IB_DDR : QIB_IB_SDR;
1984 
1985 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1986 		    IB_PHYSPORTSTATE_DISABLED)
1987 			qib_set_ib_7322_lstate(ppd, 0,
1988 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1989 		else
1990 			/*
1991 			 * Since going into a recovery state causes the link
1992 			 * state to go down and since recovery is transitory,
1993 			 * it is better if we "miss" ever seeing the link
1994 			 * training state go into recovery (i.e., ignore this
1995 			 * transition for link state special handling purposes)
1996 			 * without updating lastibcstat.
1997 			 */
1998 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1999 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
2000 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2001 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2002 				qib_handle_e_ibstatuschanged(ppd, ibcs);
2003 	}
2004 	if (*msg && iserr)
2005 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2006 
2007 	if (ppd->state_wanted & ppd->lflags)
2008 		wake_up_interruptible(&ppd->state_wait);
2009 done:
2010 	return;
2011 }
2012 
2013 /* enable/disable chip from delivering interrupts */
qib_7322_set_intr_state(struct qib_devdata * dd,u32 enable)2014 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2015 {
2016 	if (enable) {
2017 		if (dd->flags & QIB_BADINTR)
2018 			return;
2019 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2020 		/* cause any pending enabled interrupts to be re-delivered */
2021 		qib_write_kreg(dd, kr_intclear, 0ULL);
2022 		if (dd->cspec->num_msix_entries) {
2023 			/* and same for MSIx */
2024 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2025 
2026 			if (val)
2027 				qib_write_kreg(dd, kr_intgranted, val);
2028 		}
2029 	} else
2030 		qib_write_kreg(dd, kr_intmask, 0ULL);
2031 }
2032 
2033 /*
2034  * Try to cleanup as much as possible for anything that might have gone
2035  * wrong while in freeze mode, such as pio buffers being written by user
2036  * processes (causing armlaunch), send errors due to going into freeze mode,
2037  * etc., and try to avoid causing extra interrupts while doing so.
2038  * Forcibly update the in-memory pioavail register copies after cleanup
2039  * because the chip won't do it while in freeze mode (the register values
2040  * themselves are kept correct).
2041  * Make sure that we don't lose any important interrupts by using the chip
2042  * feature that says that writing 0 to a bit in *clear that is set in
2043  * *status will cause an interrupt to be generated again (if allowed by
2044  * the *mask value).
2045  * This is in chip-specific code because of all of the register accesses,
2046  * even though the details are similar on most chips.
2047  */
qib_7322_clear_freeze(struct qib_devdata * dd)2048 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2049 {
2050 	int pidx;
2051 
2052 	/* disable error interrupts, to avoid confusion */
2053 	qib_write_kreg(dd, kr_errmask, 0ULL);
2054 
2055 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2056 		if (dd->pport[pidx].link_speed_supported)
2057 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2058 					    0ULL);
2059 
2060 	/* also disable interrupts; errormask is sometimes overwritten */
2061 	qib_7322_set_intr_state(dd, 0);
2062 
2063 	/* clear the freeze, and be sure chip saw it */
2064 	qib_write_kreg(dd, kr_control, dd->control);
2065 	qib_read_kreg32(dd, kr_scratch);
2066 
2067 	/*
2068 	 * Force new interrupt if any hwerr, error or interrupt bits are
2069 	 * still set, and clear "safe" send packet errors related to freeze
2070 	 * and cancelling sends.  Re-enable error interrupts before possible
2071 	 * force of re-interrupt on pending interrupts.
2072 	 */
2073 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2074 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2075 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2076 	/* We need to purge per-port errs and reset mask, too */
2077 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2078 		if (!dd->pport[pidx].link_speed_supported)
2079 			continue;
2080 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2081 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2082 	}
2083 	qib_7322_set_intr_state(dd, 1);
2084 }
2085 
2086 /* no error handling to speak of */
2087 /**
2088  * qib_7322_handle_hwerrors - display hardware errors.
2089  * @dd: the qlogic_ib device
2090  * @msg: the output buffer
2091  * @msgl: the size of the output buffer
2092  *
2093  * Use same msg buffer as regular errors to avoid excessive stack
2094  * use.  Most hardware errors are catastrophic, but for right now,
2095  * we'll print them and continue.  We reuse the same message buffer as
2096  * qib_handle_errors() to avoid excessive stack usage.
2097  */
qib_7322_handle_hwerrors(struct qib_devdata * dd,char * msg,size_t msgl)2098 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2099 				     size_t msgl)
2100 {
2101 	u64 hwerrs;
2102 	u32 ctrl;
2103 	int isfatal = 0;
2104 
2105 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2106 	if (!hwerrs)
2107 		goto bail;
2108 	if (hwerrs == ~0ULL) {
2109 		qib_dev_err(dd,
2110 			"Read of hardware error status failed (all bits set); ignoring\n");
2111 		goto bail;
2112 	}
2113 	qib_stats.sps_hwerrs++;
2114 
2115 	/* Always clear the error status register, except BIST fail */
2116 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2117 		       ~HWE_MASK(PowerOnBISTFailed));
2118 
2119 	hwerrs &= dd->cspec->hwerrmask;
2120 
2121 	/* no EEPROM logging, yet */
2122 
2123 	if (hwerrs)
2124 		qib_devinfo(dd->pcidev,
2125 			"Hardware error: hwerr=0x%llx (cleared)\n",
2126 			(unsigned long long) hwerrs);
2127 
2128 	ctrl = qib_read_kreg32(dd, kr_control);
2129 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2130 		/*
2131 		 * No recovery yet...
2132 		 */
2133 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2134 		    dd->cspec->stay_in_freeze) {
2135 			/*
2136 			 * If any set that we aren't ignoring only make the
2137 			 * complaint once, in case it's stuck or recurring,
2138 			 * and we get here multiple times
2139 			 * Force link down, so switch knows, and
2140 			 * LEDs are turned off.
2141 			 */
2142 			if (dd->flags & QIB_INITTED)
2143 				isfatal = 1;
2144 		} else
2145 			qib_7322_clear_freeze(dd);
2146 	}
2147 
2148 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2149 		isfatal = 1;
2150 		strlcpy(msg,
2151 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2152 			msgl);
2153 		/* ignore from now on, so disable until driver reloaded */
2154 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2155 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2156 	}
2157 
2158 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2159 
2160 	/* Ignore esoteric PLL failures et al. */
2161 
2162 	qib_dev_err(dd, "%s hardware error\n", msg);
2163 
2164 	if (hwerrs &
2165 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2166 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2167 		int pidx = 0;
2168 		int err;
2169 		unsigned long flags;
2170 		struct qib_pportdata *ppd = dd->pport;
2171 
2172 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2173 			err = 0;
2174 			if (pidx == 0 && (hwerrs &
2175 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2176 				err++;
2177 			if (pidx == 1 && (hwerrs &
2178 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2179 				err++;
2180 			if (err) {
2181 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2182 				dump_sdma_7322_state(ppd);
2183 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2184 			}
2185 		}
2186 	}
2187 
2188 	if (isfatal && !dd->diag_client) {
2189 		qib_dev_err(dd,
2190 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2191 			dd->serial);
2192 		/*
2193 		 * for /sys status file and user programs to print; if no
2194 		 * trailing brace is copied, we'll know it was truncated.
2195 		 */
2196 		if (dd->freezemsg)
2197 			snprintf(dd->freezemsg, dd->freezelen,
2198 				 "{%s}", msg);
2199 		qib_disable_after_error(dd);
2200 	}
2201 bail:;
2202 }
2203 
2204 /**
2205  * qib_7322_init_hwerrors - enable hardware errors
2206  * @dd: the qlogic_ib device
2207  *
2208  * now that we have finished initializing everything that might reasonably
2209  * cause a hardware error, and cleared those errors bits as they occur,
2210  * we can enable hardware errors in the mask (potentially enabling
2211  * freeze mode), and enable hardware errors as errors (along with
2212  * everything else) in errormask
2213  */
qib_7322_init_hwerrors(struct qib_devdata * dd)2214 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2215 {
2216 	int pidx;
2217 	u64 extsval;
2218 
2219 	extsval = qib_read_kreg64(dd, kr_extstatus);
2220 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2221 			 QIB_EXTS_MEMBIST_ENDTEST)))
2222 		qib_dev_err(dd, "MemBIST did not complete!\n");
2223 
2224 	/* never clear BIST failure, so reported on each driver load */
2225 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2226 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2227 
2228 	/* clear all */
2229 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2230 	/* enable errors that are masked, at least this first time. */
2231 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2232 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2233 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2234 		if (dd->pport[pidx].link_speed_supported)
2235 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2236 					    ~0ULL);
2237 }
2238 
2239 /*
2240  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2241  * on chips that are count-based, rather than trigger-based.  There is no
2242  * reference counting, but that's also fine, given the intended use.
2243  * Only chip-specific because it's all register accesses
2244  */
qib_set_7322_armlaunch(struct qib_devdata * dd,u32 enable)2245 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2246 {
2247 	if (enable) {
2248 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2249 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2250 	} else
2251 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2252 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2253 }
2254 
2255 /*
2256  * Formerly took parameter <which> in pre-shifted,
2257  * pre-merged form with LinkCmd and LinkInitCmd
2258  * together, and assuming the zero was NOP.
2259  */
qib_set_ib_7322_lstate(struct qib_pportdata * ppd,u16 linkcmd,u16 linitcmd)2260 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2261 				   u16 linitcmd)
2262 {
2263 	u64 mod_wd;
2264 	struct qib_devdata *dd = ppd->dd;
2265 	unsigned long flags;
2266 
2267 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2268 		/*
2269 		 * If we are told to disable, note that so link-recovery
2270 		 * code does not attempt to bring us back up.
2271 		 * Also reset everything that we can, so we start
2272 		 * completely clean when re-enabled (before we
2273 		 * actually issue the disable to the IBC)
2274 		 */
2275 		qib_7322_mini_pcs_reset(ppd);
2276 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2277 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2278 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2279 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2280 		/*
2281 		 * Any other linkinitcmd will lead to LINKDOWN and then
2282 		 * to INIT (if all is well), so clear flag to let
2283 		 * link-recovery code attempt to bring us back up.
2284 		 */
2285 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2286 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2287 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2288 		/*
2289 		 * Clear status change interrupt reduction so the
2290 		 * new state is seen.
2291 		 */
2292 		ppd->cpspec->ibcctrl_a &=
2293 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2294 	}
2295 
2296 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2297 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2298 
2299 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2300 			    mod_wd);
2301 	/* write to chip to prevent back-to-back writes of ibc reg */
2302 	qib_write_kreg(dd, kr_scratch, 0);
2303 
2304 }
2305 
2306 /*
2307  * The total RCV buffer memory is 64KB, used for both ports, and is
2308  * in units of 64 bytes (same as IB flow control credit unit).
2309  * The consumedVL unit in the same registers are in 32 byte units!
2310  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2311  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2312  * in krp_rxcreditvl15, rather than 10.
2313  */
2314 #define RCV_BUF_UNITSZ 64
2315 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2316 
set_vls(struct qib_pportdata * ppd)2317 static void set_vls(struct qib_pportdata *ppd)
2318 {
2319 	int i, numvls, totcred, cred_vl, vl0extra;
2320 	struct qib_devdata *dd = ppd->dd;
2321 	u64 val;
2322 
2323 	numvls = qib_num_vls(ppd->vls_operational);
2324 
2325 	/*
2326 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2327 	 * 1) port is disabled at the time early_init is called.
2328 	 * 2) give VL15 17 credits, for two max-plausible packets.
2329 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2330 	 */
2331 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2332 	totcred = NUM_RCV_BUF_UNITS(dd);
2333 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2334 	totcred -= cred_vl;
2335 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2336 	cred_vl = totcred / numvls;
2337 	vl0extra = totcred - cred_vl * numvls;
2338 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2339 	for (i = 1; i < numvls; i++)
2340 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2341 	for (; i < 8; i++) /* no buffer space for other VLs */
2342 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2343 
2344 	/* Notify IBC that credits need to be recalculated */
2345 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2346 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2347 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2348 	qib_write_kreg(dd, kr_scratch, 0ULL);
2349 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2350 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2351 
2352 	for (i = 0; i < numvls; i++)
2353 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2354 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2355 
2356 	/* Change the number of operational VLs */
2357 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2358 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2359 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2360 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2361 	qib_write_kreg(dd, kr_scratch, 0ULL);
2362 }
2363 
2364 /*
2365  * The code that deals with actual SerDes is in serdes_7322_init().
2366  * Compared to the code for iba7220, it is minimal.
2367  */
2368 static int serdes_7322_init(struct qib_pportdata *ppd);
2369 
2370 /**
2371  * qib_7322_bringup_serdes - bring up the serdes
2372  * @ppd: physical port on the qlogic_ib device
2373  */
qib_7322_bringup_serdes(struct qib_pportdata * ppd)2374 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2375 {
2376 	struct qib_devdata *dd = ppd->dd;
2377 	u64 val, guid, ibc;
2378 	unsigned long flags;
2379 	int ret = 0;
2380 
2381 	/*
2382 	 * SerDes model not in Pd, but still need to
2383 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2384 	 * eventually.
2385 	 */
2386 	/* Put IBC in reset, sends disabled (should be in reset already) */
2387 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2388 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2389 	qib_write_kreg(dd, kr_scratch, 0ULL);
2390 
2391 	/* ensure previous Tx parameters are not still forced */
2392 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2393 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2394 		reset_tx_deemphasis_override));
2395 
2396 	if (qib_compat_ddr_negotiate) {
2397 		ppd->cpspec->ibdeltainprog = 1;
2398 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2399 						crp_ibsymbolerr);
2400 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2401 						crp_iblinkerrrecov);
2402 	}
2403 
2404 	/* flowcontrolwatermark is in units of KBytes */
2405 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2406 	/*
2407 	 * Flow control is sent this often, even if no changes in
2408 	 * buffer space occur.  Units are 128ns for this chip.
2409 	 * Set to 3usec.
2410 	 */
2411 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2412 	/* max error tolerance */
2413 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2414 	/* IB credit flow control. */
2415 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2416 	/*
2417 	 * set initial max size pkt IBC will send, including ICRC; it's the
2418 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2419 	 */
2420 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2421 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2422 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2423 
2424 	/*
2425 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2426 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2427 	 */
2428 	qib_7322_mini_pcs_reset(ppd);
2429 
2430 	if (!ppd->cpspec->ibcctrl_b) {
2431 		unsigned lse = ppd->link_speed_enabled;
2432 
2433 		/*
2434 		 * Not on re-init after reset, establish shadow
2435 		 * and force initial config.
2436 		 */
2437 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2438 							     krp_ibcctrl_b);
2439 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2440 				IBA7322_IBC_SPEED_DDR |
2441 				IBA7322_IBC_SPEED_SDR |
2442 				IBA7322_IBC_WIDTH_AUTONEG |
2443 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2444 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2445 			ppd->cpspec->ibcctrl_b |=
2446 				(lse << IBA7322_IBC_SPEED_LSB) |
2447 				IBA7322_IBC_IBTA_1_2_MASK |
2448 				IBA7322_IBC_MAX_SPEED_MASK;
2449 		else
2450 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2451 				IBA7322_IBC_SPEED_QDR |
2452 				 IBA7322_IBC_IBTA_1_2_MASK :
2453 				(lse == QIB_IB_DDR) ?
2454 					IBA7322_IBC_SPEED_DDR :
2455 					IBA7322_IBC_SPEED_SDR;
2456 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2457 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2458 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2459 		else
2460 			ppd->cpspec->ibcctrl_b |=
2461 				ppd->link_width_enabled == IB_WIDTH_4X ?
2462 				IBA7322_IBC_WIDTH_4X_ONLY :
2463 				IBA7322_IBC_WIDTH_1X_ONLY;
2464 
2465 		/* always enable these on driver reload, not sticky */
2466 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2467 			IBA7322_IBC_HRTBT_MASK);
2468 	}
2469 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2470 
2471 	/* setup so we have more time at CFGTEST to change H1 */
2472 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2473 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2474 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2475 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2476 
2477 	serdes_7322_init(ppd);
2478 
2479 	guid = be64_to_cpu(ppd->guid);
2480 	if (!guid) {
2481 		if (dd->base_guid)
2482 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2483 		ppd->guid = cpu_to_be64(guid);
2484 	}
2485 
2486 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2487 	/* write to chip to prevent back-to-back writes of ibc reg */
2488 	qib_write_kreg(dd, kr_scratch, 0);
2489 
2490 	/* Enable port */
2491 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2492 	set_vls(ppd);
2493 
2494 	/* initially come up DISABLED, without sending anything. */
2495 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2496 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2497 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2498 	qib_write_kreg(dd, kr_scratch, 0ULL);
2499 	/* clear the linkinit cmds */
2500 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2501 
2502 	/* be paranoid against later code motion, etc. */
2503 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2504 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2505 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2506 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2507 
2508 	/* Also enable IBSTATUSCHG interrupt.  */
2509 	val = qib_read_kreg_port(ppd, krp_errmask);
2510 	qib_write_kreg_port(ppd, krp_errmask,
2511 		val | ERR_MASK_N(IBStatusChanged));
2512 
2513 	/* Always zero until we start messing with SerDes for real */
2514 	return ret;
2515 }
2516 
2517 /**
2518  * qib_7322_quiet_serdes - set serdes to txidle
2519  * @dd: the qlogic_ib device
2520  * Called when driver is being unloaded
2521  */
qib_7322_mini_quiet_serdes(struct qib_pportdata * ppd)2522 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2523 {
2524 	u64 val;
2525 	unsigned long flags;
2526 
2527 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2528 
2529 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2530 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2531 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2532 	wake_up(&ppd->cpspec->autoneg_wait);
2533 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2534 	if (ppd->dd->cspec->r1)
2535 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2536 
2537 	ppd->cpspec->chase_end = 0;
2538 	if (ppd->cpspec->chase_timer.function) /* if initted */
2539 		del_timer_sync(&ppd->cpspec->chase_timer);
2540 
2541 	/*
2542 	 * Despite the name, actually disables IBC as well. Do it when
2543 	 * we are as sure as possible that no more packets can be
2544 	 * received, following the down and the PCS reset.
2545 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2546 	 * along with the PCS being reset.
2547 	 */
2548 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2549 	qib_7322_mini_pcs_reset(ppd);
2550 
2551 	/*
2552 	 * Update the adjusted counters so the adjustment persists
2553 	 * across driver reload.
2554 	 */
2555 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2556 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2557 		struct qib_devdata *dd = ppd->dd;
2558 		u64 diagc;
2559 
2560 		/* enable counter writes */
2561 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2562 		qib_write_kreg(dd, kr_hwdiagctrl,
2563 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2564 
2565 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2566 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2567 			if (ppd->cpspec->ibdeltainprog)
2568 				val -= val - ppd->cpspec->ibsymsnap;
2569 			val -= ppd->cpspec->ibsymdelta;
2570 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2571 		}
2572 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2573 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2574 			if (ppd->cpspec->ibdeltainprog)
2575 				val -= val - ppd->cpspec->iblnkerrsnap;
2576 			val -= ppd->cpspec->iblnkerrdelta;
2577 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2578 		}
2579 		if (ppd->cpspec->iblnkdowndelta) {
2580 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2581 			val += ppd->cpspec->iblnkdowndelta;
2582 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2583 		}
2584 		/*
2585 		 * No need to save ibmalfdelta since IB perfcounters
2586 		 * are cleared on driver reload.
2587 		 */
2588 
2589 		/* and disable counter writes */
2590 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2591 	}
2592 }
2593 
2594 /**
2595  * qib_setup_7322_setextled - set the state of the two external LEDs
2596  * @ppd: physical port on the qlogic_ib device
2597  * @on: whether the link is up or not
2598  *
2599  * The exact combo of LEDs if on is true is determined by looking
2600  * at the ibcstatus.
2601  *
2602  * These LEDs indicate the physical and logical state of IB link.
2603  * For this chip (at least with recommended board pinouts), LED1
2604  * is Yellow (logical state) and LED2 is Green (physical state),
2605  *
2606  * Note:  We try to match the Mellanox HCA LED behavior as best
2607  * we can.  Green indicates physical link state is OK (something is
2608  * plugged in, and we can train).
2609  * Amber indicates the link is logically up (ACTIVE).
2610  * Mellanox further blinks the amber LED to indicate data packet
2611  * activity, but we have no hardware support for that, so it would
2612  * require waking up every 10-20 msecs and checking the counters
2613  * on the chip, and then turning the LED off if appropriate.  That's
2614  * visible overhead, so not something we will do.
2615  */
qib_setup_7322_setextled(struct qib_pportdata * ppd,u32 on)2616 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2617 {
2618 	struct qib_devdata *dd = ppd->dd;
2619 	u64 extctl, ledblink = 0, val;
2620 	unsigned long flags;
2621 	int yel, grn;
2622 
2623 	/*
2624 	 * The diags use the LED to indicate diag info, so we leave
2625 	 * the external LED alone when the diags are running.
2626 	 */
2627 	if (dd->diag_client)
2628 		return;
2629 
2630 	/* Allow override of LED display for, e.g. Locating system in rack */
2631 	if (ppd->led_override) {
2632 		grn = (ppd->led_override & QIB_LED_PHYS);
2633 		yel = (ppd->led_override & QIB_LED_LOG);
2634 	} else if (on) {
2635 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2636 		grn = qib_7322_phys_portstate(val) ==
2637 			IB_PHYSPORTSTATE_LINKUP;
2638 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2639 	} else {
2640 		grn = 0;
2641 		yel = 0;
2642 	}
2643 
2644 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2645 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2646 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2647 	if (grn) {
2648 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2649 		/*
2650 		 * Counts are in chip clock (4ns) periods.
2651 		 * This is 1/16 sec (66.6ms) on,
2652 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2653 		 */
2654 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2655 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2656 	}
2657 	if (yel)
2658 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2659 	dd->cspec->extctrl = extctl;
2660 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2661 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2662 
2663 	if (ledblink) /* blink the LED on packet receive */
2664 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2665 }
2666 
2667 #ifdef CONFIG_INFINIBAND_QIB_DCA
2668 
qib_7322_notify_dca(struct qib_devdata * dd,unsigned long event)2669 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2670 {
2671 	switch (event) {
2672 	case DCA_PROVIDER_ADD:
2673 		if (dd->flags & QIB_DCA_ENABLED)
2674 			break;
2675 		if (!dca_add_requester(&dd->pcidev->dev)) {
2676 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2677 			dd->flags |= QIB_DCA_ENABLED;
2678 			qib_setup_dca(dd);
2679 		}
2680 		break;
2681 	case DCA_PROVIDER_REMOVE:
2682 		if (dd->flags & QIB_DCA_ENABLED) {
2683 			dca_remove_requester(&dd->pcidev->dev);
2684 			dd->flags &= ~QIB_DCA_ENABLED;
2685 			dd->cspec->dca_ctrl = 0;
2686 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2687 				dd->cspec->dca_ctrl);
2688 		}
2689 		break;
2690 	}
2691 	return 0;
2692 }
2693 
qib_update_rhdrq_dca(struct qib_ctxtdata * rcd,int cpu)2694 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2695 {
2696 	struct qib_devdata *dd = rcd->dd;
2697 	struct qib_chip_specific *cspec = dd->cspec;
2698 
2699 	if (!(dd->flags & QIB_DCA_ENABLED))
2700 		return;
2701 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2702 		const struct dca_reg_map *rmp;
2703 
2704 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2705 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2706 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2707 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2708 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2709 		qib_devinfo(dd->pcidev,
2710 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2711 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2712 		qib_write_kreg(dd, rmp->regno,
2713 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2714 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2715 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2716 	}
2717 }
2718 
qib_update_sdma_dca(struct qib_pportdata * ppd,int cpu)2719 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2720 {
2721 	struct qib_devdata *dd = ppd->dd;
2722 	struct qib_chip_specific *cspec = dd->cspec;
2723 	unsigned pidx = ppd->port - 1;
2724 
2725 	if (!(dd->flags & QIB_DCA_ENABLED))
2726 		return;
2727 	if (cspec->sdma_cpu[pidx] != cpu) {
2728 		cspec->sdma_cpu[pidx] = cpu;
2729 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2730 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2731 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2732 		cspec->dca_rcvhdr_ctrl[4] |=
2733 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2734 				(ppd->hw_pidx ?
2735 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2736 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2737 		qib_devinfo(dd->pcidev,
2738 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2739 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2740 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2741 			       cspec->dca_rcvhdr_ctrl[4]);
2742 		cspec->dca_ctrl |= ppd->hw_pidx ?
2743 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2744 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2745 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2746 	}
2747 }
2748 
qib_setup_dca(struct qib_devdata * dd)2749 static void qib_setup_dca(struct qib_devdata *dd)
2750 {
2751 	struct qib_chip_specific *cspec = dd->cspec;
2752 	int i;
2753 
2754 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2755 		cspec->rhdr_cpu[i] = -1;
2756 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2757 		cspec->sdma_cpu[i] = -1;
2758 	cspec->dca_rcvhdr_ctrl[0] =
2759 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2760 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2761 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2762 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2763 	cspec->dca_rcvhdr_ctrl[1] =
2764 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2765 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2766 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2768 	cspec->dca_rcvhdr_ctrl[2] =
2769 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2770 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2771 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2772 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2773 	cspec->dca_rcvhdr_ctrl[3] =
2774 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2775 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2776 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2777 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2778 	cspec->dca_rcvhdr_ctrl[4] =
2779 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2780 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2781 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2782 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2783 			       cspec->dca_rcvhdr_ctrl[i]);
2784 	for (i = 0; i < cspec->num_msix_entries; i++)
2785 		setup_dca_notifier(dd, i);
2786 }
2787 
qib_irq_notifier_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2788 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2789 			     const cpumask_t *mask)
2790 {
2791 	struct qib_irq_notify *n =
2792 		container_of(notify, struct qib_irq_notify, notify);
2793 	int cpu = cpumask_first(mask);
2794 
2795 	if (n->rcv) {
2796 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2797 
2798 		qib_update_rhdrq_dca(rcd, cpu);
2799 	} else {
2800 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2801 
2802 		qib_update_sdma_dca(ppd, cpu);
2803 	}
2804 }
2805 
qib_irq_notifier_release(struct kref * ref)2806 static void qib_irq_notifier_release(struct kref *ref)
2807 {
2808 	struct qib_irq_notify *n =
2809 		container_of(ref, struct qib_irq_notify, notify.kref);
2810 	struct qib_devdata *dd;
2811 
2812 	if (n->rcv) {
2813 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2814 
2815 		dd = rcd->dd;
2816 	} else {
2817 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2818 
2819 		dd = ppd->dd;
2820 	}
2821 	qib_devinfo(dd->pcidev,
2822 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2823 	kfree(n);
2824 }
2825 #endif
2826 
qib_7322_free_irq(struct qib_devdata * dd)2827 static void qib_7322_free_irq(struct qib_devdata *dd)
2828 {
2829 	u64 intgranted;
2830 	int i;
2831 
2832 	dd->cspec->main_int_mask = ~0ULL;
2833 
2834 	for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2835 		/* only free IRQs that were allocated */
2836 		if (dd->cspec->msix_entries[i].arg) {
2837 #ifdef CONFIG_INFINIBAND_QIB_DCA
2838 			reset_dca_notifier(dd, i);
2839 #endif
2840 			irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2841 					      NULL);
2842 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2843 			pci_free_irq(dd->pcidev, i,
2844 				     dd->cspec->msix_entries[i].arg);
2845 		}
2846 	}
2847 
2848 	/* If num_msix_entries was 0, disable the INTx IRQ */
2849 	if (!dd->cspec->num_msix_entries)
2850 		pci_free_irq(dd->pcidev, 0, dd);
2851 	else
2852 		dd->cspec->num_msix_entries = 0;
2853 
2854 	pci_free_irq_vectors(dd->pcidev);
2855 
2856 	/* make sure no MSIx interrupts are left pending */
2857 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2858 	if (intgranted)
2859 		qib_write_kreg(dd, kr_intgranted, intgranted);
2860 }
2861 
qib_setup_7322_cleanup(struct qib_devdata * dd)2862 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2863 {
2864 	int i;
2865 
2866 #ifdef CONFIG_INFINIBAND_QIB_DCA
2867 	if (dd->flags & QIB_DCA_ENABLED) {
2868 		dca_remove_requester(&dd->pcidev->dev);
2869 		dd->flags &= ~QIB_DCA_ENABLED;
2870 		dd->cspec->dca_ctrl = 0;
2871 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2872 	}
2873 #endif
2874 
2875 	qib_7322_free_irq(dd);
2876 	kfree(dd->cspec->cntrs);
2877 	kfree(dd->cspec->sendchkenable);
2878 	kfree(dd->cspec->sendgrhchk);
2879 	kfree(dd->cspec->sendibchk);
2880 	kfree(dd->cspec->msix_entries);
2881 	for (i = 0; i < dd->num_pports; i++) {
2882 		unsigned long flags;
2883 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2884 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2885 
2886 		kfree(dd->pport[i].cpspec->portcntrs);
2887 		if (dd->flags & QIB_HAS_QSFP) {
2888 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2889 			dd->cspec->gpio_mask &= ~mask;
2890 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2891 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2892 		}
2893 	}
2894 }
2895 
2896 /* handle SDMA interrupts */
sdma_7322_intr(struct qib_devdata * dd,u64 istat)2897 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2898 {
2899 	struct qib_pportdata *ppd0 = &dd->pport[0];
2900 	struct qib_pportdata *ppd1 = &dd->pport[1];
2901 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2902 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2903 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2904 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2905 
2906 	if (intr0)
2907 		qib_sdma_intr(ppd0);
2908 	if (intr1)
2909 		qib_sdma_intr(ppd1);
2910 
2911 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2912 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2913 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2914 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2915 }
2916 
2917 /*
2918  * Set or clear the Send buffer available interrupt enable bit.
2919  */
qib_wantpiobuf_7322_intr(struct qib_devdata * dd,u32 needint)2920 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2921 {
2922 	unsigned long flags;
2923 
2924 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2925 	if (needint)
2926 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2927 	else
2928 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2929 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2930 	qib_write_kreg(dd, kr_scratch, 0ULL);
2931 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2932 }
2933 
2934 /*
2935  * Somehow got an interrupt with reserved bits set in interrupt status.
2936  * Print a message so we know it happened, then clear them.
2937  * keep mainline interrupt handler cache-friendly
2938  */
unknown_7322_ibits(struct qib_devdata * dd,u64 istat)2939 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2940 {
2941 	u64 kills;
2942 	char msg[128];
2943 
2944 	kills = istat & ~QIB_I_BITSEXTANT;
2945 	qib_dev_err(dd,
2946 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2947 		(unsigned long long) kills, msg);
2948 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2949 }
2950 
2951 /* keep mainline interrupt handler cache-friendly */
unknown_7322_gpio_intr(struct qib_devdata * dd)2952 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2953 {
2954 	u32 gpiostatus;
2955 	int handled = 0;
2956 	int pidx;
2957 
2958 	/*
2959 	 * Boards for this chip currently don't use GPIO interrupts,
2960 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2961 	 * to developer.  To avoid endless repeats, clear
2962 	 * the bits in the mask, since there is some kind of
2963 	 * programming error or chip problem.
2964 	 */
2965 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2966 	/*
2967 	 * In theory, writing GPIOstatus to GPIOclear could
2968 	 * have a bad side-effect on some diagnostic that wanted
2969 	 * to poll for a status-change, but the various shadows
2970 	 * make that problematic at best. Diags will just suppress
2971 	 * all GPIO interrupts during such tests.
2972 	 */
2973 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2974 	/*
2975 	 * Check for QSFP MOD_PRS changes
2976 	 * only works for single port if IB1 != pidx1
2977 	 */
2978 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2979 	     ++pidx) {
2980 		struct qib_pportdata *ppd;
2981 		struct qib_qsfp_data *qd;
2982 		u32 mask;
2983 
2984 		if (!dd->pport[pidx].link_speed_supported)
2985 			continue;
2986 		mask = QSFP_GPIO_MOD_PRS_N;
2987 		ppd = dd->pport + pidx;
2988 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2989 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2990 			u64 pins;
2991 
2992 			qd = &ppd->cpspec->qsfp_data;
2993 			gpiostatus &= ~mask;
2994 			pins = qib_read_kreg64(dd, kr_extstatus);
2995 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
2996 			if (!(pins & mask)) {
2997 				++handled;
2998 				qd->t_insert = jiffies;
2999 				queue_work(ib_wq, &qd->work);
3000 			}
3001 		}
3002 	}
3003 
3004 	if (gpiostatus && !handled) {
3005 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3006 		u32 gpio_irq = mask & gpiostatus;
3007 
3008 		/*
3009 		 * Clear any troublemakers, and update chip from shadow
3010 		 */
3011 		dd->cspec->gpio_mask &= ~gpio_irq;
3012 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3013 	}
3014 }
3015 
3016 /*
3017  * Handle errors and unusual events first, separate function
3018  * to improve cache hits for fast path interrupt handling.
3019  */
unlikely_7322_intr(struct qib_devdata * dd,u64 istat)3020 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3021 {
3022 	if (istat & ~QIB_I_BITSEXTANT)
3023 		unknown_7322_ibits(dd, istat);
3024 	if (istat & QIB_I_GPIO)
3025 		unknown_7322_gpio_intr(dd);
3026 	if (istat & QIB_I_C_ERROR) {
3027 		qib_write_kreg(dd, kr_errmask, 0ULL);
3028 		tasklet_schedule(&dd->error_tasklet);
3029 	}
3030 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3031 		handle_7322_p_errors(dd->rcd[0]->ppd);
3032 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3033 		handle_7322_p_errors(dd->rcd[1]->ppd);
3034 }
3035 
3036 /*
3037  * Dynamically adjust the rcv int timeout for a context based on incoming
3038  * packet rate.
3039  */
adjust_rcv_timeout(struct qib_ctxtdata * rcd,int npkts)3040 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3041 {
3042 	struct qib_devdata *dd = rcd->dd;
3043 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3044 
3045 	/*
3046 	 * Dynamically adjust idle timeout on chip
3047 	 * based on number of packets processed.
3048 	 */
3049 	if (npkts < rcv_int_count && timeout > 2)
3050 		timeout >>= 1;
3051 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3052 		timeout = min(timeout << 1, rcv_int_timeout);
3053 	else
3054 		return;
3055 
3056 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3057 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3058 }
3059 
3060 /*
3061  * This is the main interrupt handler.
3062  * It will normally only be used for low frequency interrupts but may
3063  * have to handle all interrupts if INTx is enabled or fewer than normal
3064  * MSIx interrupts were allocated.
3065  * This routine should ignore the interrupt bits for any of the
3066  * dedicated MSIx handlers.
3067  */
qib_7322intr(int irq,void * data)3068 static irqreturn_t qib_7322intr(int irq, void *data)
3069 {
3070 	struct qib_devdata *dd = data;
3071 	irqreturn_t ret;
3072 	u64 istat;
3073 	u64 ctxtrbits;
3074 	u64 rmask;
3075 	unsigned i;
3076 	u32 npkts;
3077 
3078 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3079 		/*
3080 		 * This return value is not great, but we do not want the
3081 		 * interrupt core code to remove our interrupt handler
3082 		 * because we don't appear to be handling an interrupt
3083 		 * during a chip reset.
3084 		 */
3085 		ret = IRQ_HANDLED;
3086 		goto bail;
3087 	}
3088 
3089 	istat = qib_read_kreg64(dd, kr_intstatus);
3090 
3091 	if (unlikely(istat == ~0ULL)) {
3092 		qib_bad_intrstatus(dd);
3093 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3094 		/* don't know if it was our interrupt or not */
3095 		ret = IRQ_NONE;
3096 		goto bail;
3097 	}
3098 
3099 	istat &= dd->cspec->main_int_mask;
3100 	if (unlikely(!istat)) {
3101 		/* already handled, or shared and not us */
3102 		ret = IRQ_NONE;
3103 		goto bail;
3104 	}
3105 
3106 	this_cpu_inc(*dd->int_counter);
3107 
3108 	/* handle "errors" of various kinds first, device ahead of port */
3109 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3110 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3111 			      INT_MASK_P(Err, 1))))
3112 		unlikely_7322_intr(dd, istat);
3113 
3114 	/*
3115 	 * Clear the interrupt bits we found set, relatively early, so we
3116 	 * "know" know the chip will have seen this by the time we process
3117 	 * the queue, and will re-interrupt if necessary.  The processor
3118 	 * itself won't take the interrupt again until we return.
3119 	 */
3120 	qib_write_kreg(dd, kr_intclear, istat);
3121 
3122 	/*
3123 	 * Handle kernel receive queues before checking for pio buffers
3124 	 * available since receives can overflow; piobuf waiters can afford
3125 	 * a few extra cycles, since they were waiting anyway.
3126 	 */
3127 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3128 	if (ctxtrbits) {
3129 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3130 			(1ULL << QIB_I_RCVURG_LSB);
3131 		for (i = 0; i < dd->first_user_ctxt; i++) {
3132 			if (ctxtrbits & rmask) {
3133 				ctxtrbits &= ~rmask;
3134 				if (dd->rcd[i])
3135 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3136 			}
3137 			rmask <<= 1;
3138 		}
3139 		if (ctxtrbits) {
3140 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3141 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3142 			qib_handle_urcv(dd, ctxtrbits);
3143 		}
3144 	}
3145 
3146 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3147 		sdma_7322_intr(dd, istat);
3148 
3149 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3150 		qib_ib_piobufavail(dd);
3151 
3152 	ret = IRQ_HANDLED;
3153 bail:
3154 	return ret;
3155 }
3156 
3157 /*
3158  * Dedicated receive packet available interrupt handler.
3159  */
qib_7322pintr(int irq,void * data)3160 static irqreturn_t qib_7322pintr(int irq, void *data)
3161 {
3162 	struct qib_ctxtdata *rcd = data;
3163 	struct qib_devdata *dd = rcd->dd;
3164 	u32 npkts;
3165 
3166 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3167 		/*
3168 		 * This return value is not great, but we do not want the
3169 		 * interrupt core code to remove our interrupt handler
3170 		 * because we don't appear to be handling an interrupt
3171 		 * during a chip reset.
3172 		 */
3173 		return IRQ_HANDLED;
3174 
3175 	this_cpu_inc(*dd->int_counter);
3176 
3177 	/* Clear the interrupt bit we expect to be set. */
3178 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3179 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3180 
3181 	qib_kreceive(rcd, NULL, &npkts);
3182 
3183 	return IRQ_HANDLED;
3184 }
3185 
3186 /*
3187  * Dedicated Send buffer available interrupt handler.
3188  */
qib_7322bufavail(int irq,void * data)3189 static irqreturn_t qib_7322bufavail(int irq, void *data)
3190 {
3191 	struct qib_devdata *dd = data;
3192 
3193 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3194 		/*
3195 		 * This return value is not great, but we do not want the
3196 		 * interrupt core code to remove our interrupt handler
3197 		 * because we don't appear to be handling an interrupt
3198 		 * during a chip reset.
3199 		 */
3200 		return IRQ_HANDLED;
3201 
3202 	this_cpu_inc(*dd->int_counter);
3203 
3204 	/* Clear the interrupt bit we expect to be set. */
3205 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3206 
3207 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3208 	if (dd->flags & QIB_INITTED)
3209 		qib_ib_piobufavail(dd);
3210 	else
3211 		qib_wantpiobuf_7322_intr(dd, 0);
3212 
3213 	return IRQ_HANDLED;
3214 }
3215 
3216 /*
3217  * Dedicated Send DMA interrupt handler.
3218  */
sdma_intr(int irq,void * data)3219 static irqreturn_t sdma_intr(int irq, void *data)
3220 {
3221 	struct qib_pportdata *ppd = data;
3222 	struct qib_devdata *dd = ppd->dd;
3223 
3224 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3225 		/*
3226 		 * This return value is not great, but we do not want the
3227 		 * interrupt core code to remove our interrupt handler
3228 		 * because we don't appear to be handling an interrupt
3229 		 * during a chip reset.
3230 		 */
3231 		return IRQ_HANDLED;
3232 
3233 	this_cpu_inc(*dd->int_counter);
3234 
3235 	/* Clear the interrupt bit we expect to be set. */
3236 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3237 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3238 	qib_sdma_intr(ppd);
3239 
3240 	return IRQ_HANDLED;
3241 }
3242 
3243 /*
3244  * Dedicated Send DMA idle interrupt handler.
3245  */
sdma_idle_intr(int irq,void * data)3246 static irqreturn_t sdma_idle_intr(int irq, void *data)
3247 {
3248 	struct qib_pportdata *ppd = data;
3249 	struct qib_devdata *dd = ppd->dd;
3250 
3251 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3252 		/*
3253 		 * This return value is not great, but we do not want the
3254 		 * interrupt core code to remove our interrupt handler
3255 		 * because we don't appear to be handling an interrupt
3256 		 * during a chip reset.
3257 		 */
3258 		return IRQ_HANDLED;
3259 
3260 	this_cpu_inc(*dd->int_counter);
3261 
3262 	/* Clear the interrupt bit we expect to be set. */
3263 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3264 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3265 	qib_sdma_intr(ppd);
3266 
3267 	return IRQ_HANDLED;
3268 }
3269 
3270 /*
3271  * Dedicated Send DMA progress interrupt handler.
3272  */
sdma_progress_intr(int irq,void * data)3273 static irqreturn_t sdma_progress_intr(int irq, void *data)
3274 {
3275 	struct qib_pportdata *ppd = data;
3276 	struct qib_devdata *dd = ppd->dd;
3277 
3278 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3279 		/*
3280 		 * This return value is not great, but we do not want the
3281 		 * interrupt core code to remove our interrupt handler
3282 		 * because we don't appear to be handling an interrupt
3283 		 * during a chip reset.
3284 		 */
3285 		return IRQ_HANDLED;
3286 
3287 	this_cpu_inc(*dd->int_counter);
3288 
3289 	/* Clear the interrupt bit we expect to be set. */
3290 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3291 		       INT_MASK_P(SDmaProgress, 1) :
3292 		       INT_MASK_P(SDmaProgress, 0));
3293 	qib_sdma_intr(ppd);
3294 
3295 	return IRQ_HANDLED;
3296 }
3297 
3298 /*
3299  * Dedicated Send DMA cleanup interrupt handler.
3300  */
sdma_cleanup_intr(int irq,void * data)3301 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3302 {
3303 	struct qib_pportdata *ppd = data;
3304 	struct qib_devdata *dd = ppd->dd;
3305 
3306 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3307 		/*
3308 		 * This return value is not great, but we do not want the
3309 		 * interrupt core code to remove our interrupt handler
3310 		 * because we don't appear to be handling an interrupt
3311 		 * during a chip reset.
3312 		 */
3313 		return IRQ_HANDLED;
3314 
3315 	this_cpu_inc(*dd->int_counter);
3316 
3317 	/* Clear the interrupt bit we expect to be set. */
3318 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3319 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3320 		       INT_MASK_PM(SDmaCleanupDone, 0));
3321 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3322 
3323 	return IRQ_HANDLED;
3324 }
3325 
3326 #ifdef CONFIG_INFINIBAND_QIB_DCA
3327 
reset_dca_notifier(struct qib_devdata * dd,int msixnum)3328 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3329 {
3330 	if (!dd->cspec->msix_entries[msixnum].dca)
3331 		return;
3332 
3333 	qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3334 		    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3335 	irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3336 	dd->cspec->msix_entries[msixnum].notifier = NULL;
3337 }
3338 
setup_dca_notifier(struct qib_devdata * dd,int msixnum)3339 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3340 {
3341 	struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3342 	struct qib_irq_notify *n;
3343 
3344 	if (!m->dca)
3345 		return;
3346 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3347 	if (n) {
3348 		int ret;
3349 
3350 		m->notifier = n;
3351 		n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3352 		n->notify.notify = qib_irq_notifier_notify;
3353 		n->notify.release = qib_irq_notifier_release;
3354 		n->arg = m->arg;
3355 		n->rcv = m->rcv;
3356 		qib_devinfo(dd->pcidev,
3357 			"set notifier irq %d rcv %d notify %p\n",
3358 			n->notify.irq, n->rcv, &n->notify);
3359 		ret = irq_set_affinity_notifier(
3360 				n->notify.irq,
3361 				&n->notify);
3362 		if (ret) {
3363 			m->notifier = NULL;
3364 			kfree(n);
3365 		}
3366 	}
3367 }
3368 
3369 #endif
3370 
3371 /*
3372  * Set up our chip-specific interrupt handler.
3373  * The interrupt type has already been setup, so
3374  * we just need to do the registration and error checking.
3375  * If we are using MSIx interrupts, we may fall back to
3376  * INTx later, if the interrupt handler doesn't get called
3377  * within 1/2 second (see verify_interrupt()).
3378  */
qib_setup_7322_interrupt(struct qib_devdata * dd,int clearpend)3379 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3380 {
3381 	int ret, i, msixnum;
3382 	u64 redirect[6];
3383 	u64 mask;
3384 	const struct cpumask *local_mask;
3385 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3386 
3387 	if (!dd->num_pports)
3388 		return;
3389 
3390 	if (clearpend) {
3391 		/*
3392 		 * if not switching interrupt types, be sure interrupts are
3393 		 * disabled, and then clear anything pending at this point,
3394 		 * because we are starting clean.
3395 		 */
3396 		qib_7322_set_intr_state(dd, 0);
3397 
3398 		/* clear the reset error, init error/hwerror mask */
3399 		qib_7322_init_hwerrors(dd);
3400 
3401 		/* clear any interrupt bits that might be set */
3402 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3403 
3404 		/* make sure no pending MSIx intr, and clear diag reg */
3405 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3406 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3407 	}
3408 
3409 	if (!dd->cspec->num_msix_entries) {
3410 		/* Try to get INTx interrupt */
3411 try_intx:
3412 		ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3413 				      QIB_DRV_NAME);
3414 		if (ret) {
3415 			qib_dev_err(
3416 				dd,
3417 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3418 				pci_irq_vector(dd->pcidev, 0), ret);
3419 			return;
3420 		}
3421 		dd->cspec->main_int_mask = ~0ULL;
3422 		return;
3423 	}
3424 
3425 	/* Try to get MSIx interrupts */
3426 	memset(redirect, 0, sizeof(redirect));
3427 	mask = ~0ULL;
3428 	msixnum = 0;
3429 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3430 	firstcpu = cpumask_first(local_mask);
3431 	if (firstcpu >= nr_cpu_ids ||
3432 			cpumask_weight(local_mask) == num_online_cpus()) {
3433 		local_mask = topology_core_cpumask(0);
3434 		firstcpu = cpumask_first(local_mask);
3435 	}
3436 	if (firstcpu < nr_cpu_ids) {
3437 		secondcpu = cpumask_next(firstcpu, local_mask);
3438 		if (secondcpu >= nr_cpu_ids)
3439 			secondcpu = firstcpu;
3440 		currrcvcpu = secondcpu;
3441 	}
3442 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3443 		irq_handler_t handler;
3444 		void *arg;
3445 		int lsb, reg, sh;
3446 #ifdef CONFIG_INFINIBAND_QIB_DCA
3447 		int dca = 0;
3448 #endif
3449 		if (i < ARRAY_SIZE(irq_table)) {
3450 			if (irq_table[i].port) {
3451 				/* skip if for a non-configured port */
3452 				if (irq_table[i].port > dd->num_pports)
3453 					continue;
3454 				arg = dd->pport + irq_table[i].port - 1;
3455 			} else
3456 				arg = dd;
3457 #ifdef CONFIG_INFINIBAND_QIB_DCA
3458 			dca = irq_table[i].dca;
3459 #endif
3460 			lsb = irq_table[i].lsb;
3461 			handler = irq_table[i].handler;
3462 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3463 					      NULL, arg, QIB_DRV_NAME "%d%s",
3464 					      dd->unit,
3465 					      irq_table[i].name);
3466 		} else {
3467 			unsigned ctxt;
3468 
3469 			ctxt = i - ARRAY_SIZE(irq_table);
3470 			/* per krcvq context receive interrupt */
3471 			arg = dd->rcd[ctxt];
3472 			if (!arg)
3473 				continue;
3474 			if (qib_krcvq01_no_msi && ctxt < 2)
3475 				continue;
3476 #ifdef CONFIG_INFINIBAND_QIB_DCA
3477 			dca = 1;
3478 #endif
3479 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3480 			handler = qib_7322pintr;
3481 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3482 					      NULL, arg,
3483 					      QIB_DRV_NAME "%d (kctx)",
3484 					      dd->unit);
3485 		}
3486 
3487 		if (ret) {
3488 			/*
3489 			 * Shouldn't happen since the enable said we could
3490 			 * have as many as we are trying to setup here.
3491 			 */
3492 			qib_dev_err(dd,
3493 				    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3494 				    msixnum,
3495 				    pci_irq_vector(dd->pcidev, msixnum),
3496 				    ret);
3497 			qib_7322_free_irq(dd);
3498 			pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3499 					      PCI_IRQ_LEGACY);
3500 			goto try_intx;
3501 		}
3502 		dd->cspec->msix_entries[msixnum].arg = arg;
3503 #ifdef CONFIG_INFINIBAND_QIB_DCA
3504 		dd->cspec->msix_entries[msixnum].dca = dca;
3505 		dd->cspec->msix_entries[msixnum].rcv =
3506 			handler == qib_7322pintr;
3507 #endif
3508 		if (lsb >= 0) {
3509 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3510 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3511 				SYM_LSB(IntRedirect0, vec1);
3512 			mask &= ~(1ULL << lsb);
3513 			redirect[reg] |= ((u64) msixnum) << sh;
3514 		}
3515 		qib_read_kreg64(dd, 2 * msixnum + 1 +
3516 				(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3517 		if (firstcpu < nr_cpu_ids &&
3518 			zalloc_cpumask_var(
3519 				&dd->cspec->msix_entries[msixnum].mask,
3520 				GFP_KERNEL)) {
3521 			if (handler == qib_7322pintr) {
3522 				cpumask_set_cpu(currrcvcpu,
3523 					dd->cspec->msix_entries[msixnum].mask);
3524 				currrcvcpu = cpumask_next(currrcvcpu,
3525 					local_mask);
3526 				if (currrcvcpu >= nr_cpu_ids)
3527 					currrcvcpu = secondcpu;
3528 			} else {
3529 				cpumask_set_cpu(firstcpu,
3530 					dd->cspec->msix_entries[msixnum].mask);
3531 			}
3532 			irq_set_affinity_hint(
3533 				pci_irq_vector(dd->pcidev, msixnum),
3534 				dd->cspec->msix_entries[msixnum].mask);
3535 		}
3536 		msixnum++;
3537 	}
3538 	/* Initialize the vector mapping */
3539 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3540 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3541 	dd->cspec->main_int_mask = mask;
3542 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3543 		(unsigned long)dd);
3544 }
3545 
3546 /**
3547  * qib_7322_boardname - fill in the board name and note features
3548  * @dd: the qlogic_ib device
3549  *
3550  * info will be based on the board revision register
3551  */
qib_7322_boardname(struct qib_devdata * dd)3552 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3553 {
3554 	/* Will need enumeration of board-types here */
3555 	u32 boardid;
3556 	unsigned int features = DUAL_PORT_CAP;
3557 
3558 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3559 
3560 	switch (boardid) {
3561 	case 0:
3562 		dd->boardname = "InfiniPath_QLE7342_Emulation";
3563 		break;
3564 	case 1:
3565 		dd->boardname = "InfiniPath_QLE7340";
3566 		dd->flags |= QIB_HAS_QSFP;
3567 		features = PORT_SPD_CAP;
3568 		break;
3569 	case 2:
3570 		dd->boardname = "InfiniPath_QLE7342";
3571 		dd->flags |= QIB_HAS_QSFP;
3572 		break;
3573 	case 3:
3574 		dd->boardname = "InfiniPath_QMI7342";
3575 		break;
3576 	case 4:
3577 		dd->boardname = "InfiniPath_Unsupported7342";
3578 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3579 		features = 0;
3580 		break;
3581 	case BOARD_QMH7342:
3582 		dd->boardname = "InfiniPath_QMH7342";
3583 		features = 0x24;
3584 		break;
3585 	case BOARD_QME7342:
3586 		dd->boardname = "InfiniPath_QME7342";
3587 		break;
3588 	case 8:
3589 		dd->boardname = "InfiniPath_QME7362";
3590 		dd->flags |= QIB_HAS_QSFP;
3591 		break;
3592 	case BOARD_QMH7360:
3593 		dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3594 		dd->flags |= QIB_HAS_QSFP;
3595 		break;
3596 	case 15:
3597 		dd->boardname = "InfiniPath_QLE7342_TEST";
3598 		dd->flags |= QIB_HAS_QSFP;
3599 		break;
3600 	default:
3601 		dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3602 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3603 		break;
3604 	}
3605 	dd->board_atten = 1; /* index into txdds_Xdr */
3606 
3607 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3608 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3609 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3610 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3611 		 dd->majrev, dd->minrev,
3612 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3613 
3614 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3615 		qib_devinfo(dd->pcidev,
3616 			    "IB%u: Forced to single port mode by module parameter\n",
3617 			    dd->unit);
3618 		features &= PORT_SPD_CAP;
3619 	}
3620 
3621 	return features;
3622 }
3623 
3624 /*
3625  * This routine sleeps, so it can only be called from user context, not
3626  * from interrupt context.
3627  */
qib_do_7322_reset(struct qib_devdata * dd)3628 static int qib_do_7322_reset(struct qib_devdata *dd)
3629 {
3630 	u64 val;
3631 	u64 *msix_vecsave = NULL;
3632 	int i, msix_entries, ret = 1;
3633 	u16 cmdval;
3634 	u8 int_line, clinesz;
3635 	unsigned long flags;
3636 
3637 	/* Use dev_err so it shows up in logs, etc. */
3638 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3639 
3640 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3641 
3642 	msix_entries = dd->cspec->num_msix_entries;
3643 
3644 	/* no interrupts till re-initted */
3645 	qib_7322_set_intr_state(dd, 0);
3646 
3647 	qib_7322_free_irq(dd);
3648 
3649 	if (msix_entries) {
3650 		/* can be up to 512 bytes, too big for stack */
3651 		msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3652 					     sizeof(u64),
3653 					     GFP_KERNEL);
3654 	}
3655 
3656 	/*
3657 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3658 	 * info that is set up by the BIOS, so we have to save and restore
3659 	 * it ourselves.   There is some risk something could change it,
3660 	 * after we save it, but since we have disabled the MSIx, it
3661 	 * shouldn't be touched...
3662 	 */
3663 	for (i = 0; i < msix_entries; i++) {
3664 		u64 vecaddr, vecdata;
3665 
3666 		vecaddr = qib_read_kreg64(dd, 2 * i +
3667 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3668 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3669 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3670 		if (msix_vecsave) {
3671 			msix_vecsave[2 * i] = vecaddr;
3672 			/* save it without the masked bit set */
3673 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3674 		}
3675 	}
3676 
3677 	dd->pport->cpspec->ibdeltainprog = 0;
3678 	dd->pport->cpspec->ibsymdelta = 0;
3679 	dd->pport->cpspec->iblnkerrdelta = 0;
3680 	dd->pport->cpspec->ibmalfdelta = 0;
3681 	/* so we check interrupts work again */
3682 	dd->z_int_counter = qib_int_counter(dd);
3683 
3684 	/*
3685 	 * Keep chip from being accessed until we are ready.  Use
3686 	 * writeq() directly, to allow the write even though QIB_PRESENT
3687 	 * isn't set.
3688 	 */
3689 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3690 	dd->flags |= QIB_DOING_RESET;
3691 	val = dd->control | QLOGIC_IB_C_RESET;
3692 	writeq(val, &dd->kregbase[kr_control]);
3693 
3694 	for (i = 1; i <= 5; i++) {
3695 		/*
3696 		 * Allow MBIST, etc. to complete; longer on each retry.
3697 		 * We sometimes get machine checks from bus timeout if no
3698 		 * response, so for now, make it *really* long.
3699 		 */
3700 		msleep(1000 + (1 + i) * 3000);
3701 
3702 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3703 
3704 		/*
3705 		 * Use readq directly, so we don't need to mark it as PRESENT
3706 		 * until we get a successful indication that all is well.
3707 		 */
3708 		val = readq(&dd->kregbase[kr_revision]);
3709 		if (val == dd->revision)
3710 			break;
3711 		if (i == 5) {
3712 			qib_dev_err(dd,
3713 				"Failed to initialize after reset, unusable\n");
3714 			ret = 0;
3715 			goto  bail;
3716 		}
3717 	}
3718 
3719 	dd->flags |= QIB_PRESENT; /* it's back */
3720 
3721 	if (msix_entries) {
3722 		/* restore the MSIx vector address and data if saved above */
3723 		for (i = 0; i < msix_entries; i++) {
3724 			if (!msix_vecsave || !msix_vecsave[2 * i])
3725 				continue;
3726 			qib_write_kreg(dd, 2 * i +
3727 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3728 				msix_vecsave[2 * i]);
3729 			qib_write_kreg(dd, 1 + 2 * i +
3730 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3731 				msix_vecsave[1 + 2 * i]);
3732 		}
3733 	}
3734 
3735 	/* initialize the remaining registers.  */
3736 	for (i = 0; i < dd->num_pports; ++i)
3737 		write_7322_init_portregs(&dd->pport[i]);
3738 	write_7322_initregs(dd);
3739 
3740 	if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3741 		qib_dev_err(dd,
3742 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3743 
3744 	dd->cspec->num_msix_entries = msix_entries;
3745 	qib_setup_7322_interrupt(dd, 1);
3746 
3747 	for (i = 0; i < dd->num_pports; ++i) {
3748 		struct qib_pportdata *ppd = &dd->pport[i];
3749 
3750 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3751 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3752 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3753 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3754 	}
3755 
3756 bail:
3757 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3758 	kfree(msix_vecsave);
3759 	return ret;
3760 }
3761 
3762 /**
3763  * qib_7322_put_tid - write a TID to the chip
3764  * @dd: the qlogic_ib device
3765  * @tidptr: pointer to the expected TID (in chip) to update
3766  * @tidtype: 0 for eager, 1 for expected
3767  * @pa: physical address of in memory buffer; tidinvalid if freeing
3768  */
qib_7322_put_tid(struct qib_devdata * dd,u64 __iomem * tidptr,u32 type,unsigned long pa)3769 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3770 			     u32 type, unsigned long pa)
3771 {
3772 	if (!(dd->flags & QIB_PRESENT))
3773 		return;
3774 	if (pa != dd->tidinvalid) {
3775 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3776 
3777 		/* paranoia checks */
3778 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3779 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3780 				    pa);
3781 			return;
3782 		}
3783 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3784 			qib_dev_err(dd,
3785 				"Physical page address 0x%lx larger than supported\n",
3786 				pa);
3787 			return;
3788 		}
3789 
3790 		if (type == RCVHQ_RCV_TYPE_EAGER)
3791 			chippa |= dd->tidtemplate;
3792 		else /* for now, always full 4KB page */
3793 			chippa |= IBA7322_TID_SZ_4K;
3794 		pa = chippa;
3795 	}
3796 	writeq(pa, tidptr);
3797 	mmiowb();
3798 }
3799 
3800 /**
3801  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3802  * @dd: the qlogic_ib device
3803  * @ctxt: the ctxt
3804  *
3805  * clear all TID entries for a ctxt, expected and eager.
3806  * Used from qib_close().
3807  */
qib_7322_clear_tids(struct qib_devdata * dd,struct qib_ctxtdata * rcd)3808 static void qib_7322_clear_tids(struct qib_devdata *dd,
3809 				struct qib_ctxtdata *rcd)
3810 {
3811 	u64 __iomem *tidbase;
3812 	unsigned long tidinv;
3813 	u32 ctxt;
3814 	int i;
3815 
3816 	if (!dd->kregbase || !rcd)
3817 		return;
3818 
3819 	ctxt = rcd->ctxt;
3820 
3821 	tidinv = dd->tidinvalid;
3822 	tidbase = (u64 __iomem *)
3823 		((char __iomem *) dd->kregbase +
3824 		 dd->rcvtidbase +
3825 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3826 
3827 	for (i = 0; i < dd->rcvtidcnt; i++)
3828 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3829 				 tidinv);
3830 
3831 	tidbase = (u64 __iomem *)
3832 		((char __iomem *) dd->kregbase +
3833 		 dd->rcvegrbase +
3834 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3835 
3836 	for (i = 0; i < rcd->rcvegrcnt; i++)
3837 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3838 				 tidinv);
3839 }
3840 
3841 /**
3842  * qib_7322_tidtemplate - setup constants for TID updates
3843  * @dd: the qlogic_ib device
3844  *
3845  * We setup stuff that we use a lot, to avoid calculating each time
3846  */
qib_7322_tidtemplate(struct qib_devdata * dd)3847 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3848 {
3849 	/*
3850 	 * For now, we always allocate 4KB buffers (at init) so we can
3851 	 * receive max size packets.  We may want a module parameter to
3852 	 * specify 2KB or 4KB and/or make it per port instead of per device
3853 	 * for those who want to reduce memory footprint.  Note that the
3854 	 * rcvhdrentsize size must be large enough to hold the largest
3855 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3856 	 * course the 2 dwords of RHF).
3857 	 */
3858 	if (dd->rcvegrbufsize == 2048)
3859 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3860 	else if (dd->rcvegrbufsize == 4096)
3861 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3862 	dd->tidinvalid = 0;
3863 }
3864 
3865 /**
3866  * qib_init_7322_get_base_info - set chip-specific flags for user code
3867  * @rcd: the qlogic_ib ctxt
3868  * @kbase: qib_base_info pointer
3869  *
3870  * We set the PCIE flag because the lower bandwidth on PCIe vs
3871  * HyperTransport can affect some user packet algorithims.
3872  */
3873 
qib_7322_get_base_info(struct qib_ctxtdata * rcd,struct qib_base_info * kinfo)3874 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3875 				  struct qib_base_info *kinfo)
3876 {
3877 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3878 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3879 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3880 	if (rcd->dd->cspec->r1)
3881 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3882 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3883 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3884 
3885 	return 0;
3886 }
3887 
3888 static struct qib_message_header *
qib_7322_get_msgheader(struct qib_devdata * dd,__le32 * rhf_addr)3889 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3890 {
3891 	u32 offset = qib_hdrget_offset(rhf_addr);
3892 
3893 	return (struct qib_message_header *)
3894 		(rhf_addr - dd->rhf_offset + offset);
3895 }
3896 
3897 /*
3898  * Configure number of contexts.
3899  */
qib_7322_config_ctxts(struct qib_devdata * dd)3900 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3901 {
3902 	unsigned long flags;
3903 	u32 nchipctxts;
3904 
3905 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3906 	dd->cspec->numctxts = nchipctxts;
3907 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3908 		dd->first_user_ctxt = NUM_IB_PORTS +
3909 			(qib_n_krcv_queues - 1) * dd->num_pports;
3910 		if (dd->first_user_ctxt > nchipctxts)
3911 			dd->first_user_ctxt = nchipctxts;
3912 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3913 	} else {
3914 		dd->first_user_ctxt = NUM_IB_PORTS;
3915 		dd->n_krcv_queues = 1;
3916 	}
3917 
3918 	if (!qib_cfgctxts) {
3919 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3920 
3921 		if (nctxts <= 6)
3922 			dd->ctxtcnt = 6;
3923 		else if (nctxts <= 10)
3924 			dd->ctxtcnt = 10;
3925 		else if (nctxts <= nchipctxts)
3926 			dd->ctxtcnt = nchipctxts;
3927 	} else if (qib_cfgctxts < dd->num_pports)
3928 		dd->ctxtcnt = dd->num_pports;
3929 	else if (qib_cfgctxts <= nchipctxts)
3930 		dd->ctxtcnt = qib_cfgctxts;
3931 	if (!dd->ctxtcnt) /* none of the above, set to max */
3932 		dd->ctxtcnt = nchipctxts;
3933 
3934 	/*
3935 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3936 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3937 	 * Lock to be paranoid about later motion, etc.
3938 	 */
3939 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3940 	if (dd->ctxtcnt > 10)
3941 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3942 	else if (dd->ctxtcnt > 6)
3943 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3944 	/* else configure for default 6 receive ctxts */
3945 
3946 	/* The XRC opcode is 5. */
3947 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3948 
3949 	/*
3950 	 * RcvCtrl *must* be written here so that the
3951 	 * chip understands how to change rcvegrcnt below.
3952 	 */
3953 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3954 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3955 
3956 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3957 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3958 	if (qib_rcvhdrcnt)
3959 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3960 	else
3961 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3962 				    dd->num_pports > 1 ? 1024U : 2048U);
3963 }
3964 
qib_7322_get_ib_cfg(struct qib_pportdata * ppd,int which)3965 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3966 {
3967 
3968 	int lsb, ret = 0;
3969 	u64 maskr; /* right-justified mask */
3970 
3971 	switch (which) {
3972 
3973 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3974 		ret = ppd->link_width_enabled;
3975 		goto done;
3976 
3977 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3978 		ret = ppd->link_width_active;
3979 		goto done;
3980 
3981 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3982 		ret = ppd->link_speed_enabled;
3983 		goto done;
3984 
3985 	case QIB_IB_CFG_SPD: /* Get current Link spd */
3986 		ret = ppd->link_speed_active;
3987 		goto done;
3988 
3989 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3990 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3991 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3992 		break;
3993 
3994 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3995 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3996 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3997 		break;
3998 
3999 	case QIB_IB_CFG_LINKLATENCY:
4000 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4001 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4002 		goto done;
4003 
4004 	case QIB_IB_CFG_OP_VLS:
4005 		ret = ppd->vls_operational;
4006 		goto done;
4007 
4008 	case QIB_IB_CFG_VL_HIGH_CAP:
4009 		ret = 16;
4010 		goto done;
4011 
4012 	case QIB_IB_CFG_VL_LOW_CAP:
4013 		ret = 16;
4014 		goto done;
4015 
4016 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4017 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4018 				OverrunThreshold);
4019 		goto done;
4020 
4021 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4022 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4023 				PhyerrThreshold);
4024 		goto done;
4025 
4026 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4027 		/* will only take effect when the link state changes */
4028 		ret = (ppd->cpspec->ibcctrl_a &
4029 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4030 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4031 		goto done;
4032 
4033 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4034 		lsb = IBA7322_IBC_HRTBT_LSB;
4035 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4036 		break;
4037 
4038 	case QIB_IB_CFG_PMA_TICKS:
4039 		/*
4040 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4041 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4042 		 */
4043 		if (ppd->link_speed_active == QIB_IB_QDR)
4044 			ret = 3;
4045 		else if (ppd->link_speed_active == QIB_IB_DDR)
4046 			ret = 1;
4047 		else
4048 			ret = 0;
4049 		goto done;
4050 
4051 	default:
4052 		ret = -EINVAL;
4053 		goto done;
4054 	}
4055 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4056 done:
4057 	return ret;
4058 }
4059 
4060 /*
4061  * Below again cribbed liberally from older version. Do not lean
4062  * heavily on it.
4063  */
4064 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4065 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4066 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4067 
qib_7322_set_ib_cfg(struct qib_pportdata * ppd,int which,u32 val)4068 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4069 {
4070 	struct qib_devdata *dd = ppd->dd;
4071 	u64 maskr; /* right-justified mask */
4072 	int lsb, ret = 0;
4073 	u16 lcmd, licmd;
4074 	unsigned long flags;
4075 
4076 	switch (which) {
4077 	case QIB_IB_CFG_LIDLMC:
4078 		/*
4079 		 * Set LID and LMC. Combined to avoid possible hazard
4080 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4081 		 */
4082 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4083 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4084 		/*
4085 		 * For header-checking, the SLID in the packet will
4086 		 * be masked with SendIBSLMCMask, and compared
4087 		 * with SendIBSLIDAssignMask. Make sure we do not
4088 		 * set any bits not covered by the mask, or we get
4089 		 * false-positives.
4090 		 */
4091 		qib_write_kreg_port(ppd, krp_sendslid,
4092 				    val & (val >> 16) & SendIBSLIDAssignMask);
4093 		qib_write_kreg_port(ppd, krp_sendslidmask,
4094 				    (val >> 16) & SendIBSLMCMask);
4095 		break;
4096 
4097 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4098 		ppd->link_width_enabled = val;
4099 		/* convert IB value to chip register value */
4100 		if (val == IB_WIDTH_1X)
4101 			val = 0;
4102 		else if (val == IB_WIDTH_4X)
4103 			val = 1;
4104 		else
4105 			val = 3;
4106 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4107 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4108 		break;
4109 
4110 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4111 		/*
4112 		 * As with width, only write the actual register if the
4113 		 * link is currently down, otherwise takes effect on next
4114 		 * link change.  Since setting is being explicitly requested
4115 		 * (via MAD or sysfs), clear autoneg failure status if speed
4116 		 * autoneg is enabled.
4117 		 */
4118 		ppd->link_speed_enabled = val;
4119 		val <<= IBA7322_IBC_SPEED_LSB;
4120 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4121 			IBA7322_IBC_MAX_SPEED_MASK;
4122 		if (val & (val - 1)) {
4123 			/* Muliple speeds enabled */
4124 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4125 				IBA7322_IBC_MAX_SPEED_MASK;
4126 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4127 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4128 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4129 		} else if (val & IBA7322_IBC_SPEED_QDR)
4130 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4131 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4132 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4133 		break;
4134 
4135 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4136 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4137 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4138 		break;
4139 
4140 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4141 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4142 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4143 		break;
4144 
4145 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4146 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4147 				  OverrunThreshold);
4148 		if (maskr != val) {
4149 			ppd->cpspec->ibcctrl_a &=
4150 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4151 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4152 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4153 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4154 					    ppd->cpspec->ibcctrl_a);
4155 			qib_write_kreg(dd, kr_scratch, 0ULL);
4156 		}
4157 		goto bail;
4158 
4159 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4160 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4161 				  PhyerrThreshold);
4162 		if (maskr != val) {
4163 			ppd->cpspec->ibcctrl_a &=
4164 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4165 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4166 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4167 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4168 					    ppd->cpspec->ibcctrl_a);
4169 			qib_write_kreg(dd, kr_scratch, 0ULL);
4170 		}
4171 		goto bail;
4172 
4173 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4174 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4175 			((u64) ppd->pkeys[2] << 32) |
4176 			((u64) ppd->pkeys[3] << 48);
4177 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4178 		goto bail;
4179 
4180 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4181 		/* will only take effect when the link state changes */
4182 		if (val == IB_LINKINITCMD_POLL)
4183 			ppd->cpspec->ibcctrl_a &=
4184 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4185 		else /* SLEEP */
4186 			ppd->cpspec->ibcctrl_a |=
4187 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4188 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4189 		qib_write_kreg(dd, kr_scratch, 0ULL);
4190 		goto bail;
4191 
4192 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4193 		/*
4194 		 * Update our housekeeping variables, and set IBC max
4195 		 * size, same as init code; max IBC is max we allow in
4196 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4197 		 * Set even if it's unchanged, print debug message only
4198 		 * on changes.
4199 		 */
4200 		val = (ppd->ibmaxlen >> 2) + 1;
4201 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4202 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4203 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4204 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4205 				    ppd->cpspec->ibcctrl_a);
4206 		qib_write_kreg(dd, kr_scratch, 0ULL);
4207 		goto bail;
4208 
4209 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4210 		switch (val & 0xffff0000) {
4211 		case IB_LINKCMD_DOWN:
4212 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4213 			ppd->cpspec->ibmalfusesnap = 1;
4214 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4215 				crp_errlink);
4216 			if (!ppd->cpspec->ibdeltainprog &&
4217 			    qib_compat_ddr_negotiate) {
4218 				ppd->cpspec->ibdeltainprog = 1;
4219 				ppd->cpspec->ibsymsnap =
4220 					read_7322_creg32_port(ppd,
4221 							      crp_ibsymbolerr);
4222 				ppd->cpspec->iblnkerrsnap =
4223 					read_7322_creg32_port(ppd,
4224 						      crp_iblinkerrrecov);
4225 			}
4226 			break;
4227 
4228 		case IB_LINKCMD_ARMED:
4229 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4230 			if (ppd->cpspec->ibmalfusesnap) {
4231 				ppd->cpspec->ibmalfusesnap = 0;
4232 				ppd->cpspec->ibmalfdelta +=
4233 					read_7322_creg32_port(ppd,
4234 							      crp_errlink) -
4235 					ppd->cpspec->ibmalfsnap;
4236 			}
4237 			break;
4238 
4239 		case IB_LINKCMD_ACTIVE:
4240 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4241 			break;
4242 
4243 		default:
4244 			ret = -EINVAL;
4245 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4246 			goto bail;
4247 		}
4248 		switch (val & 0xffff) {
4249 		case IB_LINKINITCMD_NOP:
4250 			licmd = 0;
4251 			break;
4252 
4253 		case IB_LINKINITCMD_POLL:
4254 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4255 			break;
4256 
4257 		case IB_LINKINITCMD_SLEEP:
4258 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4259 			break;
4260 
4261 		case IB_LINKINITCMD_DISABLE:
4262 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4263 			ppd->cpspec->chase_end = 0;
4264 			/*
4265 			 * stop state chase counter and timer, if running.
4266 			 * wait forpending timer, but don't clear .data (ppd)!
4267 			 */
4268 			if (ppd->cpspec->chase_timer.expires) {
4269 				del_timer_sync(&ppd->cpspec->chase_timer);
4270 				ppd->cpspec->chase_timer.expires = 0;
4271 			}
4272 			break;
4273 
4274 		default:
4275 			ret = -EINVAL;
4276 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4277 				    val & 0xffff);
4278 			goto bail;
4279 		}
4280 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4281 		goto bail;
4282 
4283 	case QIB_IB_CFG_OP_VLS:
4284 		if (ppd->vls_operational != val) {
4285 			ppd->vls_operational = val;
4286 			set_vls(ppd);
4287 		}
4288 		goto bail;
4289 
4290 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4291 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4292 		goto bail;
4293 
4294 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4295 		if (val > 3) {
4296 			ret = -EINVAL;
4297 			goto bail;
4298 		}
4299 		lsb = IBA7322_IBC_HRTBT_LSB;
4300 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4301 		break;
4302 
4303 	case QIB_IB_CFG_PORT:
4304 		/* val is the port number of the switch we are connected to. */
4305 		if (ppd->dd->cspec->r1) {
4306 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4307 			ppd->cpspec->ipg_tries = 0;
4308 		}
4309 		goto bail;
4310 
4311 	default:
4312 		ret = -EINVAL;
4313 		goto bail;
4314 	}
4315 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4316 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4317 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4318 	qib_write_kreg(dd, kr_scratch, 0);
4319 bail:
4320 	return ret;
4321 }
4322 
qib_7322_set_loopback(struct qib_pportdata * ppd,const char * what)4323 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4324 {
4325 	int ret = 0;
4326 	u64 val, ctrlb;
4327 
4328 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4329 	if (!strncmp(what, "ibc", 3)) {
4330 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4331 						       Loopback);
4332 		val = 0; /* disable heart beat, so link will come up */
4333 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4334 			 ppd->dd->unit, ppd->port);
4335 	} else if (!strncmp(what, "off", 3)) {
4336 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4337 							Loopback);
4338 		/* enable heart beat again */
4339 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4340 		qib_devinfo(ppd->dd->pcidev,
4341 			"Disabling IB%u:%u IBC loopback (normal)\n",
4342 			ppd->dd->unit, ppd->port);
4343 	} else
4344 		ret = -EINVAL;
4345 	if (!ret) {
4346 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4347 				    ppd->cpspec->ibcctrl_a);
4348 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4349 					     << IBA7322_IBC_HRTBT_LSB);
4350 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4351 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4352 				    ppd->cpspec->ibcctrl_b);
4353 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4354 	}
4355 	return ret;
4356 }
4357 
get_vl_weights(struct qib_pportdata * ppd,unsigned regno,struct ib_vl_weight_elem * vl)4358 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4359 			   struct ib_vl_weight_elem *vl)
4360 {
4361 	unsigned i;
4362 
4363 	for (i = 0; i < 16; i++, regno++, vl++) {
4364 		u32 val = qib_read_kreg_port(ppd, regno);
4365 
4366 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4367 			SYM_RMASK(LowPriority0_0, VirtualLane);
4368 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4369 			SYM_RMASK(LowPriority0_0, Weight);
4370 	}
4371 }
4372 
set_vl_weights(struct qib_pportdata * ppd,unsigned regno,struct ib_vl_weight_elem * vl)4373 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4374 			   struct ib_vl_weight_elem *vl)
4375 {
4376 	unsigned i;
4377 
4378 	for (i = 0; i < 16; i++, regno++, vl++) {
4379 		u64 val;
4380 
4381 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4382 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4383 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4384 			SYM_LSB(LowPriority0_0, Weight));
4385 		qib_write_kreg_port(ppd, regno, val);
4386 	}
4387 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4388 		struct qib_devdata *dd = ppd->dd;
4389 		unsigned long flags;
4390 
4391 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4392 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4393 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4394 		qib_write_kreg(dd, kr_scratch, 0);
4395 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4396 	}
4397 }
4398 
qib_7322_get_ib_table(struct qib_pportdata * ppd,int which,void * t)4399 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4400 {
4401 	switch (which) {
4402 	case QIB_IB_TBL_VL_HIGH_ARB:
4403 		get_vl_weights(ppd, krp_highprio_0, t);
4404 		break;
4405 
4406 	case QIB_IB_TBL_VL_LOW_ARB:
4407 		get_vl_weights(ppd, krp_lowprio_0, t);
4408 		break;
4409 
4410 	default:
4411 		return -EINVAL;
4412 	}
4413 	return 0;
4414 }
4415 
qib_7322_set_ib_table(struct qib_pportdata * ppd,int which,void * t)4416 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4417 {
4418 	switch (which) {
4419 	case QIB_IB_TBL_VL_HIGH_ARB:
4420 		set_vl_weights(ppd, krp_highprio_0, t);
4421 		break;
4422 
4423 	case QIB_IB_TBL_VL_LOW_ARB:
4424 		set_vl_weights(ppd, krp_lowprio_0, t);
4425 		break;
4426 
4427 	default:
4428 		return -EINVAL;
4429 	}
4430 	return 0;
4431 }
4432 
qib_update_7322_usrhead(struct qib_ctxtdata * rcd,u64 hd,u32 updegr,u32 egrhd,u32 npkts)4433 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4434 				    u32 updegr, u32 egrhd, u32 npkts)
4435 {
4436 	/*
4437 	 * Need to write timeout register before updating rcvhdrhead to ensure
4438 	 * that the timer is enabled on reception of a packet.
4439 	 */
4440 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4441 		adjust_rcv_timeout(rcd, npkts);
4442 	if (updegr)
4443 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4444 	mmiowb();
4445 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4446 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4447 	mmiowb();
4448 }
4449 
qib_7322_hdrqempty(struct qib_ctxtdata * rcd)4450 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4451 {
4452 	u32 head, tail;
4453 
4454 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4455 	if (rcd->rcvhdrtail_kvaddr)
4456 		tail = qib_get_rcvhdrtail(rcd);
4457 	else
4458 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4459 	return head == tail;
4460 }
4461 
4462 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4463 	QIB_RCVCTRL_CTXT_DIS | \
4464 	QIB_RCVCTRL_TIDFLOW_ENB | \
4465 	QIB_RCVCTRL_TIDFLOW_DIS | \
4466 	QIB_RCVCTRL_TAILUPD_ENB | \
4467 	QIB_RCVCTRL_TAILUPD_DIS | \
4468 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4469 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4470 	QIB_RCVCTRL_BP_ENB | \
4471 	QIB_RCVCTRL_BP_DIS)
4472 
4473 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4474 	QIB_RCVCTRL_CTXT_DIS | \
4475 	QIB_RCVCTRL_PKEY_DIS | \
4476 	QIB_RCVCTRL_PKEY_ENB)
4477 
4478 /*
4479  * Modify the RCVCTRL register in chip-specific way. This
4480  * is a function because bit positions and (future) register
4481  * location is chip-specifc, but the needed operations are
4482  * generic. <op> is a bit-mask because we often want to
4483  * do multiple modifications.
4484  */
rcvctrl_7322_mod(struct qib_pportdata * ppd,unsigned int op,int ctxt)4485 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4486 			     int ctxt)
4487 {
4488 	struct qib_devdata *dd = ppd->dd;
4489 	struct qib_ctxtdata *rcd;
4490 	u64 mask, val;
4491 	unsigned long flags;
4492 
4493 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4494 
4495 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4496 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4497 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4498 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4499 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4500 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4501 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4502 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4503 	if (op & QIB_RCVCTRL_PKEY_ENB)
4504 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4505 	if (op & QIB_RCVCTRL_PKEY_DIS)
4506 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4507 	if (ctxt < 0) {
4508 		mask = (1ULL << dd->ctxtcnt) - 1;
4509 		rcd = NULL;
4510 	} else {
4511 		mask = (1ULL << ctxt);
4512 		rcd = dd->rcd[ctxt];
4513 	}
4514 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4515 		ppd->p_rcvctrl |=
4516 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4517 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4518 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4519 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4520 		}
4521 		/* Write these registers before the context is enabled. */
4522 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4523 				    rcd->rcvhdrqtailaddr_phys);
4524 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4525 				    rcd->rcvhdrq_phys);
4526 		rcd->seq_cnt = 1;
4527 	}
4528 	if (op & QIB_RCVCTRL_CTXT_DIS)
4529 		ppd->p_rcvctrl &=
4530 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4531 	if (op & QIB_RCVCTRL_BP_ENB)
4532 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4533 	if (op & QIB_RCVCTRL_BP_DIS)
4534 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4535 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4536 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4537 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4538 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4539 	/*
4540 	 * Decide which registers to write depending on the ops enabled.
4541 	 * Special case is "flush" (no bits set at all)
4542 	 * which needs to write both.
4543 	 */
4544 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4545 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4546 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4547 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4548 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4549 		/*
4550 		 * Init the context registers also; if we were
4551 		 * disabled, tail and head should both be zero
4552 		 * already from the enable, but since we don't
4553 		 * know, we have to do it explicitly.
4554 		 */
4555 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4556 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4557 
4558 		/* be sure enabling write seen; hd/tl should be 0 */
4559 		(void) qib_read_kreg32(dd, kr_scratch);
4560 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4561 		dd->rcd[ctxt]->head = val;
4562 		/* If kctxt, interrupt on next receive. */
4563 		if (ctxt < dd->first_user_ctxt)
4564 			val |= dd->rhdrhead_intr_off;
4565 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4566 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4567 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4568 		/* arm rcv interrupt */
4569 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4570 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4571 	}
4572 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4573 		unsigned f;
4574 
4575 		/* Now that the context is disabled, clear these registers. */
4576 		if (ctxt >= 0) {
4577 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4578 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4579 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4580 				qib_write_ureg(dd, ur_rcvflowtable + f,
4581 					       TIDFLOW_ERRBITS, ctxt);
4582 		} else {
4583 			unsigned i;
4584 
4585 			for (i = 0; i < dd->cfgctxts; i++) {
4586 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4587 						    i, 0);
4588 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4589 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4590 					qib_write_ureg(dd, ur_rcvflowtable + f,
4591 						       TIDFLOW_ERRBITS, i);
4592 			}
4593 		}
4594 	}
4595 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4596 }
4597 
4598 /*
4599  * Modify the SENDCTRL register in chip-specific way. This
4600  * is a function where there are multiple such registers with
4601  * slightly different layouts.
4602  * The chip doesn't allow back-to-back sendctrl writes, so write
4603  * the scratch register after writing sendctrl.
4604  *
4605  * Which register is written depends on the operation.
4606  * Most operate on the common register, while
4607  * SEND_ENB and SEND_DIS operate on the per-port ones.
4608  * SEND_ENB is included in common because it can change SPCL_TRIG
4609  */
4610 #define SENDCTRL_COMMON_MODS (\
4611 	QIB_SENDCTRL_CLEAR | \
4612 	QIB_SENDCTRL_AVAIL_DIS | \
4613 	QIB_SENDCTRL_AVAIL_ENB | \
4614 	QIB_SENDCTRL_AVAIL_BLIP | \
4615 	QIB_SENDCTRL_DISARM | \
4616 	QIB_SENDCTRL_DISARM_ALL | \
4617 	QIB_SENDCTRL_SEND_ENB)
4618 
4619 #define SENDCTRL_PORT_MODS (\
4620 	QIB_SENDCTRL_CLEAR | \
4621 	QIB_SENDCTRL_SEND_ENB | \
4622 	QIB_SENDCTRL_SEND_DIS | \
4623 	QIB_SENDCTRL_FLUSH)
4624 
sendctrl_7322_mod(struct qib_pportdata * ppd,u32 op)4625 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4626 {
4627 	struct qib_devdata *dd = ppd->dd;
4628 	u64 tmp_dd_sendctrl;
4629 	unsigned long flags;
4630 
4631 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4632 
4633 	/* First the dd ones that are "sticky", saved in shadow */
4634 	if (op & QIB_SENDCTRL_CLEAR)
4635 		dd->sendctrl = 0;
4636 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4637 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4638 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4639 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4640 		if (dd->flags & QIB_USE_SPCL_TRIG)
4641 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4642 	}
4643 
4644 	/* Then the ppd ones that are "sticky", saved in shadow */
4645 	if (op & QIB_SENDCTRL_SEND_DIS)
4646 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4647 	else if (op & QIB_SENDCTRL_SEND_ENB)
4648 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4649 
4650 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4651 		u32 i, last;
4652 
4653 		tmp_dd_sendctrl = dd->sendctrl;
4654 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4655 		/*
4656 		 * Disarm any buffers that are not yet launched,
4657 		 * disabling updates until done.
4658 		 */
4659 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4660 		for (i = 0; i < last; i++) {
4661 			qib_write_kreg(dd, kr_sendctrl,
4662 				       tmp_dd_sendctrl |
4663 				       SYM_MASK(SendCtrl, Disarm) | i);
4664 			qib_write_kreg(dd, kr_scratch, 0);
4665 		}
4666 	}
4667 
4668 	if (op & QIB_SENDCTRL_FLUSH) {
4669 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4670 
4671 		/*
4672 		 * Now drain all the fifos.  The Abort bit should never be
4673 		 * needed, so for now, at least, we don't use it.
4674 		 */
4675 		tmp_ppd_sendctrl |=
4676 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4677 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4678 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4679 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4680 		qib_write_kreg(dd, kr_scratch, 0);
4681 	}
4682 
4683 	tmp_dd_sendctrl = dd->sendctrl;
4684 
4685 	if (op & QIB_SENDCTRL_DISARM)
4686 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4687 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4688 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4689 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4690 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4691 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4692 
4693 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4694 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4695 		qib_write_kreg(dd, kr_scratch, 0);
4696 	}
4697 
4698 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4699 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4700 		qib_write_kreg(dd, kr_scratch, 0);
4701 	}
4702 
4703 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4704 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4705 		qib_write_kreg(dd, kr_scratch, 0);
4706 	}
4707 
4708 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4709 
4710 	if (op & QIB_SENDCTRL_FLUSH) {
4711 		u32 v;
4712 		/*
4713 		 * ensure writes have hit chip, then do a few
4714 		 * more reads, to allow DMA of pioavail registers
4715 		 * to occur, so in-memory copy is in sync with
4716 		 * the chip.  Not always safe to sleep.
4717 		 */
4718 		v = qib_read_kreg32(dd, kr_scratch);
4719 		qib_write_kreg(dd, kr_scratch, v);
4720 		v = qib_read_kreg32(dd, kr_scratch);
4721 		qib_write_kreg(dd, kr_scratch, v);
4722 		qib_read_kreg32(dd, kr_scratch);
4723 	}
4724 }
4725 
4726 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4727 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4728 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4729 
4730 /**
4731  * qib_portcntr_7322 - read a per-port chip counter
4732  * @ppd: the qlogic_ib pport
4733  * @creg: the counter to read (not a chip offset)
4734  */
qib_portcntr_7322(struct qib_pportdata * ppd,u32 reg)4735 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4736 {
4737 	struct qib_devdata *dd = ppd->dd;
4738 	u64 ret = 0ULL;
4739 	u16 creg;
4740 	/* 0xffff for unimplemented or synthesized counters */
4741 	static const u32 xlator[] = {
4742 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4743 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4744 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4745 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4746 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4747 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4748 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4749 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4750 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4751 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4752 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4753 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4754 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4755 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4756 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4757 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4758 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4759 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4760 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4761 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4762 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4763 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4764 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4765 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4766 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4767 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4768 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4769 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4770 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4771 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4772 		/*
4773 		 * the next 3 aren't really counters, but were implemented
4774 		 * as counters in older chips, so still get accessed as
4775 		 * though they were counters from this code.
4776 		 */
4777 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4778 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4779 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4780 		/* pseudo-counter, summed for all ports */
4781 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4782 	};
4783 
4784 	if (reg >= ARRAY_SIZE(xlator)) {
4785 		qib_devinfo(ppd->dd->pcidev,
4786 			 "Unimplemented portcounter %u\n", reg);
4787 		goto done;
4788 	}
4789 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4790 
4791 	/* handle non-counters and special cases first */
4792 	if (reg == QIBPORTCNTR_KHDROVFL) {
4793 		int i;
4794 
4795 		/* sum over all kernel contexts (skip if mini_init) */
4796 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4797 			struct qib_ctxtdata *rcd = dd->rcd[i];
4798 
4799 			if (!rcd || rcd->ppd != ppd)
4800 				continue;
4801 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4802 		}
4803 		goto done;
4804 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4805 		/*
4806 		 * Used as part of the synthesis of port_rcv_errors
4807 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4808 		 * because all the errors are already counted by other cntrs.
4809 		 */
4810 		goto done;
4811 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4812 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4813 		/* were counters in older chips, now per-port kernel regs */
4814 		ret = qib_read_kreg_port(ppd, creg);
4815 		goto done;
4816 	}
4817 
4818 	/*
4819 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4820 	 * avoid two independent reads when on Opteron.
4821 	 */
4822 	if (xlator[reg] & _PORT_64BIT_FLAG)
4823 		ret = read_7322_creg_port(ppd, creg);
4824 	else
4825 		ret = read_7322_creg32_port(ppd, creg);
4826 	if (creg == crp_ibsymbolerr) {
4827 		if (ppd->cpspec->ibdeltainprog)
4828 			ret -= ret - ppd->cpspec->ibsymsnap;
4829 		ret -= ppd->cpspec->ibsymdelta;
4830 	} else if (creg == crp_iblinkerrrecov) {
4831 		if (ppd->cpspec->ibdeltainprog)
4832 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4833 		ret -= ppd->cpspec->iblnkerrdelta;
4834 	} else if (creg == crp_errlink)
4835 		ret -= ppd->cpspec->ibmalfdelta;
4836 	else if (creg == crp_iblinkdown)
4837 		ret += ppd->cpspec->iblnkdowndelta;
4838 done:
4839 	return ret;
4840 }
4841 
4842 /*
4843  * Device counter names (not port-specific), one line per stat,
4844  * single string.  Used by utilities like ipathstats to print the stats
4845  * in a way which works for different versions of drivers, without changing
4846  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4847  * display by utility.
4848  * Non-error counters are first.
4849  * Start of "error" conters is indicated by a leading "E " on the first
4850  * "error" counter, and doesn't count in label length.
4851  * The EgrOvfl list needs to be last so we truncate them at the configured
4852  * context count for the device.
4853  * cntr7322indices contains the corresponding register indices.
4854  */
4855 static const char cntr7322names[] =
4856 	"Interrupts\n"
4857 	"HostBusStall\n"
4858 	"E RxTIDFull\n"
4859 	"RxTIDInvalid\n"
4860 	"RxTIDFloDrop\n" /* 7322 only */
4861 	"Ctxt0EgrOvfl\n"
4862 	"Ctxt1EgrOvfl\n"
4863 	"Ctxt2EgrOvfl\n"
4864 	"Ctxt3EgrOvfl\n"
4865 	"Ctxt4EgrOvfl\n"
4866 	"Ctxt5EgrOvfl\n"
4867 	"Ctxt6EgrOvfl\n"
4868 	"Ctxt7EgrOvfl\n"
4869 	"Ctxt8EgrOvfl\n"
4870 	"Ctxt9EgrOvfl\n"
4871 	"Ctx10EgrOvfl\n"
4872 	"Ctx11EgrOvfl\n"
4873 	"Ctx12EgrOvfl\n"
4874 	"Ctx13EgrOvfl\n"
4875 	"Ctx14EgrOvfl\n"
4876 	"Ctx15EgrOvfl\n"
4877 	"Ctx16EgrOvfl\n"
4878 	"Ctx17EgrOvfl\n"
4879 	;
4880 
4881 static const u32 cntr7322indices[] = {
4882 	cr_lbint | _PORT_64BIT_FLAG,
4883 	cr_lbstall | _PORT_64BIT_FLAG,
4884 	cr_tidfull,
4885 	cr_tidinvalid,
4886 	cr_rxtidflowdrop,
4887 	cr_base_egrovfl + 0,
4888 	cr_base_egrovfl + 1,
4889 	cr_base_egrovfl + 2,
4890 	cr_base_egrovfl + 3,
4891 	cr_base_egrovfl + 4,
4892 	cr_base_egrovfl + 5,
4893 	cr_base_egrovfl + 6,
4894 	cr_base_egrovfl + 7,
4895 	cr_base_egrovfl + 8,
4896 	cr_base_egrovfl + 9,
4897 	cr_base_egrovfl + 10,
4898 	cr_base_egrovfl + 11,
4899 	cr_base_egrovfl + 12,
4900 	cr_base_egrovfl + 13,
4901 	cr_base_egrovfl + 14,
4902 	cr_base_egrovfl + 15,
4903 	cr_base_egrovfl + 16,
4904 	cr_base_egrovfl + 17,
4905 };
4906 
4907 /*
4908  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4909  * portcntr7322indices is somewhat complicated by some registers needing
4910  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4911  */
4912 static const char portcntr7322names[] =
4913 	"TxPkt\n"
4914 	"TxFlowPkt\n"
4915 	"TxWords\n"
4916 	"RxPkt\n"
4917 	"RxFlowPkt\n"
4918 	"RxWords\n"
4919 	"TxFlowStall\n"
4920 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4921 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4922 	"IBStatusChng\n"
4923 	"IBLinkDown\n"
4924 	"IBLnkRecov\n"
4925 	"IBRxLinkErr\n"
4926 	"IBSymbolErr\n"
4927 	"RxLLIErr\n"
4928 	"RxBadFormat\n"
4929 	"RxBadLen\n"
4930 	"RxBufOvrfl\n"
4931 	"RxEBP\n"
4932 	"RxFlowCtlErr\n"
4933 	"RxICRCerr\n"
4934 	"RxLPCRCerr\n"
4935 	"RxVCRCerr\n"
4936 	"RxInvalLen\n"
4937 	"RxInvalPKey\n"
4938 	"RxPktDropped\n"
4939 	"TxBadLength\n"
4940 	"TxDropped\n"
4941 	"TxInvalLen\n"
4942 	"TxUnderrun\n"
4943 	"TxUnsupVL\n"
4944 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4945 	"RxVL15Drop\n"
4946 	"RxVlErr\n"
4947 	"XcessBufOvfl\n"
4948 	"RxQPBadCtxt\n" /* 7322-only from here down */
4949 	"TXBadHeader\n"
4950 	;
4951 
4952 static const u32 portcntr7322indices[] = {
4953 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4954 	crp_pktsendflow,
4955 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4956 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4957 	crp_pktrcvflowctrl,
4958 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4959 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4960 	crp_txsdmadesc | _PORT_64BIT_FLAG,
4961 	crp_rxdlidfltr,
4962 	crp_ibstatuschange,
4963 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4964 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4965 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4966 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4967 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4968 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4969 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4970 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4971 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4972 	crp_rcvflowctrlviol,
4973 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4974 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4975 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4976 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4977 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4978 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4979 	crp_txminmaxlenerr,
4980 	crp_txdroppedpkt,
4981 	crp_txlenerr,
4982 	crp_txunderrun,
4983 	crp_txunsupvl,
4984 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4985 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4986 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4987 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4988 	crp_rxqpinvalidctxt,
4989 	crp_txhdrerr,
4990 };
4991 
4992 /* do all the setup to make the counter reads efficient later */
init_7322_cntrnames(struct qib_devdata * dd)4993 static void init_7322_cntrnames(struct qib_devdata *dd)
4994 {
4995 	int i, j = 0;
4996 	char *s;
4997 
4998 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4999 	     i++) {
5000 		/* we always have at least one counter before the egrovfl */
5001 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5002 			j = 1;
5003 		s = strchr(s + 1, '\n');
5004 		if (s && j)
5005 			j++;
5006 	}
5007 	dd->cspec->ncntrs = i;
5008 	if (!s)
5009 		/* full list; size is without terminating null */
5010 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5011 	else
5012 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5013 	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
5014 					 GFP_KERNEL);
5015 
5016 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5017 		s = strchr(s + 1, '\n');
5018 	dd->cspec->nportcntrs = i - 1;
5019 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5020 	for (i = 0; i < dd->num_pports; ++i) {
5021 		dd->pport[i].cpspec->portcntrs =
5022 			kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
5023 				      GFP_KERNEL);
5024 	}
5025 }
5026 
qib_read_7322cntrs(struct qib_devdata * dd,loff_t pos,char ** namep,u64 ** cntrp)5027 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5028 			      u64 **cntrp)
5029 {
5030 	u32 ret;
5031 
5032 	if (namep) {
5033 		ret = dd->cspec->cntrnamelen;
5034 		if (pos >= ret)
5035 			ret = 0; /* final read after getting everything */
5036 		else
5037 			*namep = (char *) cntr7322names;
5038 	} else {
5039 		u64 *cntr = dd->cspec->cntrs;
5040 		int i;
5041 
5042 		ret = dd->cspec->ncntrs * sizeof(u64);
5043 		if (!cntr || pos >= ret) {
5044 			/* everything read, or couldn't get memory */
5045 			ret = 0;
5046 			goto done;
5047 		}
5048 		*cntrp = cntr;
5049 		for (i = 0; i < dd->cspec->ncntrs; i++)
5050 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5051 				*cntr++ = read_7322_creg(dd,
5052 							 cntr7322indices[i] &
5053 							 _PORT_CNTR_IDXMASK);
5054 			else
5055 				*cntr++ = read_7322_creg32(dd,
5056 							   cntr7322indices[i]);
5057 	}
5058 done:
5059 	return ret;
5060 }
5061 
qib_read_7322portcntrs(struct qib_devdata * dd,loff_t pos,u32 port,char ** namep,u64 ** cntrp)5062 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5063 				  char **namep, u64 **cntrp)
5064 {
5065 	u32 ret;
5066 
5067 	if (namep) {
5068 		ret = dd->cspec->portcntrnamelen;
5069 		if (pos >= ret)
5070 			ret = 0; /* final read after getting everything */
5071 		else
5072 			*namep = (char *)portcntr7322names;
5073 	} else {
5074 		struct qib_pportdata *ppd = &dd->pport[port];
5075 		u64 *cntr = ppd->cpspec->portcntrs;
5076 		int i;
5077 
5078 		ret = dd->cspec->nportcntrs * sizeof(u64);
5079 		if (!cntr || pos >= ret) {
5080 			/* everything read, or couldn't get memory */
5081 			ret = 0;
5082 			goto done;
5083 		}
5084 		*cntrp = cntr;
5085 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5086 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5087 				*cntr++ = qib_portcntr_7322(ppd,
5088 					portcntr7322indices[i] &
5089 					_PORT_CNTR_IDXMASK);
5090 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5091 				*cntr++ = read_7322_creg_port(ppd,
5092 					   portcntr7322indices[i] &
5093 					    _PORT_CNTR_IDXMASK);
5094 			else
5095 				*cntr++ = read_7322_creg32_port(ppd,
5096 					   portcntr7322indices[i]);
5097 		}
5098 	}
5099 done:
5100 	return ret;
5101 }
5102 
5103 /**
5104  * qib_get_7322_faststats - get word counters from chip before they overflow
5105  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5106  *
5107  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5108  * real purpose of this function is to maintain the notion of
5109  * "active time", which in turn is only logged into the eeprom,
5110  * which we don;t have, yet, for 7322-based boards.
5111  *
5112  * called from add_timer
5113  */
qib_get_7322_faststats(struct timer_list * t)5114 static void qib_get_7322_faststats(struct timer_list *t)
5115 {
5116 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5117 	struct qib_pportdata *ppd;
5118 	unsigned long flags;
5119 	u64 traffic_wds;
5120 	int pidx;
5121 
5122 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5123 		ppd = dd->pport + pidx;
5124 
5125 		/*
5126 		 * If port isn't enabled or not operational ports, or
5127 		 * diags is running (can cause memory diags to fail)
5128 		 * skip this port this time.
5129 		 */
5130 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5131 		    || dd->diag_client)
5132 			continue;
5133 
5134 		/*
5135 		 * Maintain an activity timer, based on traffic
5136 		 * exceeding a threshold, so we need to check the word-counts
5137 		 * even if they are 64-bit.
5138 		 */
5139 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5140 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5141 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5142 		traffic_wds -= ppd->dd->traffic_wds;
5143 		ppd->dd->traffic_wds += traffic_wds;
5144 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5145 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5146 						QIB_IB_QDR) &&
5147 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5148 				    QIBL_LINKACTIVE)) &&
5149 		    ppd->cpspec->qdr_dfe_time &&
5150 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5151 			ppd->cpspec->qdr_dfe_on = 0;
5152 
5153 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5154 					    ppd->dd->cspec->r1 ?
5155 					    QDR_STATIC_ADAPT_INIT_R1 :
5156 					    QDR_STATIC_ADAPT_INIT);
5157 			force_h1(ppd);
5158 		}
5159 	}
5160 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5161 }
5162 
5163 /*
5164  * If we were using MSIx, try to fallback to INTx.
5165  */
qib_7322_intr_fallback(struct qib_devdata * dd)5166 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5167 {
5168 	if (!dd->cspec->num_msix_entries)
5169 		return 0; /* already using INTx */
5170 
5171 	qib_devinfo(dd->pcidev,
5172 		"MSIx interrupt not detected, trying INTx interrupts\n");
5173 	qib_7322_free_irq(dd);
5174 	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5175 		qib_dev_err(dd, "Failed to enable INTx\n");
5176 	qib_setup_7322_interrupt(dd, 0);
5177 	return 1;
5178 }
5179 
5180 /*
5181  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5182  * than resetting the IBC or external link state, and useful in some
5183  * cases to cause some retraining.  To do this right, we reset IBC
5184  * as well, then return to previous state (which may be still in reset)
5185  * NOTE: some callers of this "know" this writes the current value
5186  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5187  * check all callers.
5188  */
qib_7322_mini_pcs_reset(struct qib_pportdata * ppd)5189 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5190 {
5191 	u64 val;
5192 	struct qib_devdata *dd = ppd->dd;
5193 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5194 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5195 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5196 
5197 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5198 	qib_write_kreg(dd, kr_hwerrmask,
5199 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5200 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5201 			    ppd->cpspec->ibcctrl_a &
5202 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5203 
5204 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5205 	qib_read_kreg32(dd, kr_scratch);
5206 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5207 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5208 	qib_write_kreg(dd, kr_scratch, 0ULL);
5209 	qib_write_kreg(dd, kr_hwerrclear,
5210 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5211 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5212 }
5213 
5214 /*
5215  * This code for non-IBTA-compliant IB speed negotiation is only known to
5216  * work for the SDR to DDR transition, and only between an HCA and a switch
5217  * with recent firmware.  It is based on observed heuristics, rather than
5218  * actual knowledge of the non-compliant speed negotiation.
5219  * It has a number of hard-coded fields, since the hope is to rewrite this
5220  * when a spec is available on how the negoation is intended to work.
5221  */
autoneg_7322_sendpkt(struct qib_pportdata * ppd,u32 * hdr,u32 dcnt,u32 * data)5222 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5223 				 u32 dcnt, u32 *data)
5224 {
5225 	int i;
5226 	u64 pbc;
5227 	u32 __iomem *piobuf;
5228 	u32 pnum, control, len;
5229 	struct qib_devdata *dd = ppd->dd;
5230 
5231 	i = 0;
5232 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5233 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5234 	pbc = ((u64) control << 32) | len;
5235 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5236 		if (i++ > 15)
5237 			return;
5238 		udelay(2);
5239 	}
5240 	/* disable header check on this packet, since it can't be valid */
5241 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5242 	writeq(pbc, piobuf);
5243 	qib_flush_wc();
5244 	qib_pio_copy(piobuf + 2, hdr, 7);
5245 	qib_pio_copy(piobuf + 9, data, dcnt);
5246 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5247 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5248 
5249 		qib_flush_wc();
5250 		__raw_writel(0xaebecede, piobuf + spcl_off);
5251 	}
5252 	qib_flush_wc();
5253 	qib_sendbuf_done(dd, pnum);
5254 	/* and re-enable hdr check */
5255 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5256 }
5257 
5258 /*
5259  * _start packet gets sent twice at start, _done gets sent twice at end
5260  */
qib_autoneg_7322_send(struct qib_pportdata * ppd,int which)5261 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5262 {
5263 	struct qib_devdata *dd = ppd->dd;
5264 	static u32 swapped;
5265 	u32 dw, i, hcnt, dcnt, *data;
5266 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5267 	static u32 madpayload_start[0x40] = {
5268 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5269 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5270 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5271 		};
5272 	static u32 madpayload_done[0x40] = {
5273 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5274 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5275 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5276 		};
5277 
5278 	dcnt = ARRAY_SIZE(madpayload_start);
5279 	hcnt = ARRAY_SIZE(hdr);
5280 	if (!swapped) {
5281 		/* for maintainability, do it at runtime */
5282 		for (i = 0; i < hcnt; i++) {
5283 			dw = (__force u32) cpu_to_be32(hdr[i]);
5284 			hdr[i] = dw;
5285 		}
5286 		for (i = 0; i < dcnt; i++) {
5287 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5288 			madpayload_start[i] = dw;
5289 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5290 			madpayload_done[i] = dw;
5291 		}
5292 		swapped = 1;
5293 	}
5294 
5295 	data = which ? madpayload_done : madpayload_start;
5296 
5297 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5298 	qib_read_kreg64(dd, kr_scratch);
5299 	udelay(2);
5300 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5301 	qib_read_kreg64(dd, kr_scratch);
5302 	udelay(2);
5303 }
5304 
5305 /*
5306  * Do the absolute minimum to cause an IB speed change, and make it
5307  * ready, but don't actually trigger the change.   The caller will
5308  * do that when ready (if link is in Polling training state, it will
5309  * happen immediately, otherwise when link next goes down)
5310  *
5311  * This routine should only be used as part of the DDR autonegotation
5312  * code for devices that are not compliant with IB 1.2 (or code that
5313  * fixes things up for same).
5314  *
5315  * When link has gone down, and autoneg enabled, or autoneg has
5316  * failed and we give up until next time we set both speeds, and
5317  * then we want IBTA enabled as well as "use max enabled speed.
5318  */
set_7322_ibspeed_fast(struct qib_pportdata * ppd,u32 speed)5319 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5320 {
5321 	u64 newctrlb;
5322 
5323 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5324 				    IBA7322_IBC_IBTA_1_2_MASK |
5325 				    IBA7322_IBC_MAX_SPEED_MASK);
5326 
5327 	if (speed & (speed - 1)) /* multiple speeds */
5328 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5329 				    IBA7322_IBC_IBTA_1_2_MASK |
5330 				    IBA7322_IBC_MAX_SPEED_MASK;
5331 	else
5332 		newctrlb |= speed == QIB_IB_QDR ?
5333 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5334 			((speed == QIB_IB_DDR ?
5335 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5336 
5337 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5338 		return;
5339 
5340 	ppd->cpspec->ibcctrl_b = newctrlb;
5341 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5342 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5343 }
5344 
5345 /*
5346  * This routine is only used when we are not talking to another
5347  * IB 1.2-compliant device that we think can do DDR.
5348  * (This includes all existing switch chips as of Oct 2007.)
5349  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5350  */
try_7322_autoneg(struct qib_pportdata * ppd)5351 static void try_7322_autoneg(struct qib_pportdata *ppd)
5352 {
5353 	unsigned long flags;
5354 
5355 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5356 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5357 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5358 	qib_autoneg_7322_send(ppd, 0);
5359 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5360 	qib_7322_mini_pcs_reset(ppd);
5361 	/* 2 msec is minimum length of a poll cycle */
5362 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5363 			   msecs_to_jiffies(2));
5364 }
5365 
5366 /*
5367  * Handle the empirically determined mechanism for auto-negotiation
5368  * of DDR speed with switches.
5369  */
autoneg_7322_work(struct work_struct * work)5370 static void autoneg_7322_work(struct work_struct *work)
5371 {
5372 	struct qib_pportdata *ppd;
5373 	u32 i;
5374 	unsigned long flags;
5375 
5376 	ppd = container_of(work, struct qib_chippport_specific,
5377 			    autoneg_work.work)->ppd;
5378 
5379 	/*
5380 	 * Busy wait for this first part, it should be at most a
5381 	 * few hundred usec, since we scheduled ourselves for 2msec.
5382 	 */
5383 	for (i = 0; i < 25; i++) {
5384 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5385 		     == IB_7322_LT_STATE_POLLQUIET) {
5386 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5387 			break;
5388 		}
5389 		udelay(100);
5390 	}
5391 
5392 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5393 		goto done; /* we got there early or told to stop */
5394 
5395 	/* we expect this to timeout */
5396 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5397 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5398 			       msecs_to_jiffies(90)))
5399 		goto done;
5400 	qib_7322_mini_pcs_reset(ppd);
5401 
5402 	/* we expect this to timeout */
5403 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5404 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5405 			       msecs_to_jiffies(1700)))
5406 		goto done;
5407 	qib_7322_mini_pcs_reset(ppd);
5408 
5409 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5410 
5411 	/*
5412 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5413 	 * this should terminate early.
5414 	 */
5415 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5416 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5417 		msecs_to_jiffies(250));
5418 done:
5419 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5420 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5421 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5422 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5423 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5424 			ppd->cpspec->autoneg_tries = 0;
5425 		}
5426 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5427 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5428 	}
5429 }
5430 
5431 /*
5432  * This routine is used to request IPG set in the QLogic switch.
5433  * Only called if r1.
5434  */
try_7322_ipg(struct qib_pportdata * ppd)5435 static void try_7322_ipg(struct qib_pportdata *ppd)
5436 {
5437 	struct qib_ibport *ibp = &ppd->ibport_data;
5438 	struct ib_mad_send_buf *send_buf;
5439 	struct ib_mad_agent *agent;
5440 	struct ib_smp *smp;
5441 	unsigned delay;
5442 	int ret;
5443 
5444 	agent = ibp->rvp.send_agent;
5445 	if (!agent)
5446 		goto retry;
5447 
5448 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5449 				      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5450 				      IB_MGMT_BASE_VERSION);
5451 	if (IS_ERR(send_buf))
5452 		goto retry;
5453 
5454 	if (!ibp->smi_ah) {
5455 		struct ib_ah *ah;
5456 
5457 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5458 		if (IS_ERR(ah))
5459 			ret = PTR_ERR(ah);
5460 		else {
5461 			send_buf->ah = ah;
5462 			ibp->smi_ah = ibah_to_rvtah(ah);
5463 			ret = 0;
5464 		}
5465 	} else {
5466 		send_buf->ah = &ibp->smi_ah->ibah;
5467 		ret = 0;
5468 	}
5469 
5470 	smp = send_buf->mad;
5471 	smp->base_version = IB_MGMT_BASE_VERSION;
5472 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5473 	smp->class_version = 1;
5474 	smp->method = IB_MGMT_METHOD_SEND;
5475 	smp->hop_cnt = 1;
5476 	smp->attr_id = QIB_VENDOR_IPG;
5477 	smp->attr_mod = 0;
5478 
5479 	if (!ret)
5480 		ret = ib_post_send_mad(send_buf, NULL);
5481 	if (ret)
5482 		ib_free_send_mad(send_buf);
5483 retry:
5484 	delay = 2 << ppd->cpspec->ipg_tries;
5485 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5486 			   msecs_to_jiffies(delay));
5487 }
5488 
5489 /*
5490  * Timeout handler for setting IPG.
5491  * Only called if r1.
5492  */
ipg_7322_work(struct work_struct * work)5493 static void ipg_7322_work(struct work_struct *work)
5494 {
5495 	struct qib_pportdata *ppd;
5496 
5497 	ppd = container_of(work, struct qib_chippport_specific,
5498 			   ipg_work.work)->ppd;
5499 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5500 	    && ++ppd->cpspec->ipg_tries <= 10)
5501 		try_7322_ipg(ppd);
5502 }
5503 
qib_7322_iblink_state(u64 ibcs)5504 static u32 qib_7322_iblink_state(u64 ibcs)
5505 {
5506 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5507 
5508 	switch (state) {
5509 	case IB_7322_L_STATE_INIT:
5510 		state = IB_PORT_INIT;
5511 		break;
5512 	case IB_7322_L_STATE_ARM:
5513 		state = IB_PORT_ARMED;
5514 		break;
5515 	case IB_7322_L_STATE_ACTIVE:
5516 		/* fall through */
5517 	case IB_7322_L_STATE_ACT_DEFER:
5518 		state = IB_PORT_ACTIVE;
5519 		break;
5520 	default: /* fall through */
5521 	case IB_7322_L_STATE_DOWN:
5522 		state = IB_PORT_DOWN;
5523 		break;
5524 	}
5525 	return state;
5526 }
5527 
5528 /* returns the IBTA port state, rather than the IBC link training state */
qib_7322_phys_portstate(u64 ibcs)5529 static u8 qib_7322_phys_portstate(u64 ibcs)
5530 {
5531 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5532 	return qib_7322_physportstate[state];
5533 }
5534 
qib_7322_ib_updown(struct qib_pportdata * ppd,int ibup,u64 ibcs)5535 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5536 {
5537 	int ret = 0, symadj = 0;
5538 	unsigned long flags;
5539 	int mult;
5540 
5541 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5542 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5543 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5544 
5545 	/* Update our picture of width and speed from chip */
5546 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5547 		ppd->link_speed_active = QIB_IB_QDR;
5548 		mult = 4;
5549 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5550 		ppd->link_speed_active = QIB_IB_DDR;
5551 		mult = 2;
5552 	} else {
5553 		ppd->link_speed_active = QIB_IB_SDR;
5554 		mult = 1;
5555 	}
5556 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5557 		ppd->link_width_active = IB_WIDTH_4X;
5558 		mult *= 4;
5559 	} else
5560 		ppd->link_width_active = IB_WIDTH_1X;
5561 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5562 
5563 	if (!ibup) {
5564 		u64 clr;
5565 
5566 		/* Link went down. */
5567 		/* do IPG MAD again after linkdown, even if last time failed */
5568 		ppd->cpspec->ipg_tries = 0;
5569 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5570 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5571 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5572 		if (clr)
5573 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5574 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5575 				     QIBL_IB_AUTONEG_INPROG)))
5576 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5577 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5578 			struct qib_qsfp_data *qd =
5579 				&ppd->cpspec->qsfp_data;
5580 			/* unlock the Tx settings, speed may change */
5581 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5582 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5583 				reset_tx_deemphasis_override));
5584 			qib_cancel_sends(ppd);
5585 			/* on link down, ensure sane pcs state */
5586 			qib_7322_mini_pcs_reset(ppd);
5587 			/* schedule the qsfp refresh which should turn the link
5588 			   off */
5589 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5590 				qd->t_insert = jiffies;
5591 				queue_work(ib_wq, &qd->work);
5592 			}
5593 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5594 			if (__qib_sdma_running(ppd))
5595 				__qib_sdma_process_event(ppd,
5596 					qib_sdma_event_e70_go_idle);
5597 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5598 		}
5599 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5600 		if (clr == ppd->cpspec->iblnkdownsnap)
5601 			ppd->cpspec->iblnkdowndelta++;
5602 	} else {
5603 		if (qib_compat_ddr_negotiate &&
5604 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5605 				     QIBL_IB_AUTONEG_INPROG)) &&
5606 		    ppd->link_speed_active == QIB_IB_SDR &&
5607 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5608 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5609 			/* we are SDR, and auto-negotiation enabled */
5610 			++ppd->cpspec->autoneg_tries;
5611 			if (!ppd->cpspec->ibdeltainprog) {
5612 				ppd->cpspec->ibdeltainprog = 1;
5613 				ppd->cpspec->ibsymdelta +=
5614 					read_7322_creg32_port(ppd,
5615 						crp_ibsymbolerr) -
5616 						ppd->cpspec->ibsymsnap;
5617 				ppd->cpspec->iblnkerrdelta +=
5618 					read_7322_creg32_port(ppd,
5619 						crp_iblinkerrrecov) -
5620 						ppd->cpspec->iblnkerrsnap;
5621 			}
5622 			try_7322_autoneg(ppd);
5623 			ret = 1; /* no other IB status change processing */
5624 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5625 			   ppd->link_speed_active == QIB_IB_SDR) {
5626 			qib_autoneg_7322_send(ppd, 1);
5627 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5628 			qib_7322_mini_pcs_reset(ppd);
5629 			udelay(2);
5630 			ret = 1; /* no other IB status change processing */
5631 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5632 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5633 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5634 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5635 					 QIBL_IB_AUTONEG_FAILED);
5636 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5637 			ppd->cpspec->autoneg_tries = 0;
5638 			/* re-enable SDR, for next link down */
5639 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5640 			wake_up(&ppd->cpspec->autoneg_wait);
5641 			symadj = 1;
5642 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5643 			/*
5644 			 * Clear autoneg failure flag, and do setup
5645 			 * so we'll try next time link goes down and
5646 			 * back to INIT (possibly connected to a
5647 			 * different device).
5648 			 */
5649 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5650 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5651 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5652 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5653 			symadj = 1;
5654 		}
5655 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5656 			symadj = 1;
5657 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5658 				try_7322_ipg(ppd);
5659 			if (!ppd->cpspec->recovery_init)
5660 				setup_7322_link_recovery(ppd, 0);
5661 			ppd->cpspec->qdr_dfe_time = jiffies +
5662 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5663 		}
5664 		ppd->cpspec->ibmalfusesnap = 0;
5665 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5666 			crp_errlink);
5667 	}
5668 	if (symadj) {
5669 		ppd->cpspec->iblnkdownsnap =
5670 			read_7322_creg32_port(ppd, crp_iblinkdown);
5671 		if (ppd->cpspec->ibdeltainprog) {
5672 			ppd->cpspec->ibdeltainprog = 0;
5673 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5674 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5675 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5676 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5677 		}
5678 	} else if (!ibup && qib_compat_ddr_negotiate &&
5679 		   !ppd->cpspec->ibdeltainprog &&
5680 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5681 		ppd->cpspec->ibdeltainprog = 1;
5682 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5683 			crp_ibsymbolerr);
5684 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5685 			crp_iblinkerrrecov);
5686 	}
5687 
5688 	if (!ret)
5689 		qib_setup_7322_setextled(ppd, ibup);
5690 	return ret;
5691 }
5692 
5693 /*
5694  * Does read/modify/write to appropriate registers to
5695  * set output and direction bits selected by mask.
5696  * these are in their canonical postions (e.g. lsb of
5697  * dir will end up in D48 of extctrl on existing chips).
5698  * returns contents of GP Inputs.
5699  */
gpio_7322_mod(struct qib_devdata * dd,u32 out,u32 dir,u32 mask)5700 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5701 {
5702 	u64 read_val, new_out;
5703 	unsigned long flags;
5704 
5705 	if (mask) {
5706 		/* some bits being written, lock access to GPIO */
5707 		dir &= mask;
5708 		out &= mask;
5709 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5710 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5711 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5712 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5713 
5714 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5715 		qib_write_kreg(dd, kr_gpio_out, new_out);
5716 		dd->cspec->gpio_out = new_out;
5717 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5718 	}
5719 	/*
5720 	 * It is unlikely that a read at this time would get valid
5721 	 * data on a pin whose direction line was set in the same
5722 	 * call to this function. We include the read here because
5723 	 * that allows us to potentially combine a change on one pin with
5724 	 * a read on another, and because the old code did something like
5725 	 * this.
5726 	 */
5727 	read_val = qib_read_kreg64(dd, kr_extstatus);
5728 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5729 }
5730 
5731 /* Enable writes to config EEPROM, if possible. Returns previous state */
qib_7322_eeprom_wen(struct qib_devdata * dd,int wen)5732 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5733 {
5734 	int prev_wen;
5735 	u32 mask;
5736 
5737 	mask = 1 << QIB_EEPROM_WEN_NUM;
5738 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5739 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5740 
5741 	return prev_wen & 1;
5742 }
5743 
5744 /*
5745  * Read fundamental info we need to use the chip.  These are
5746  * the registers that describe chip capabilities, and are
5747  * saved in shadow registers.
5748  */
get_7322_chip_params(struct qib_devdata * dd)5749 static void get_7322_chip_params(struct qib_devdata *dd)
5750 {
5751 	u64 val;
5752 	u32 piobufs;
5753 	int mtu;
5754 
5755 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5756 
5757 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5758 
5759 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5760 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5761 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5762 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5763 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5764 
5765 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5766 	dd->piobcnt2k = val & ~0U;
5767 	dd->piobcnt4k = val >> 32;
5768 	val = qib_read_kreg64(dd, kr_sendpiosize);
5769 	dd->piosize2k = val & ~0U;
5770 	dd->piosize4k = val >> 32;
5771 
5772 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5773 	if (mtu == -1)
5774 		mtu = QIB_DEFAULT_MTU;
5775 	dd->pport[0].ibmtu = (u32)mtu;
5776 	dd->pport[1].ibmtu = (u32)mtu;
5777 
5778 	/* these may be adjusted in init_chip_wc_pat() */
5779 	dd->pio2kbase = (u32 __iomem *)
5780 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5781 	dd->pio4kbase = (u32 __iomem *)
5782 		((char __iomem *) dd->kregbase +
5783 		 (dd->piobufbase >> 32));
5784 	/*
5785 	 * 4K buffers take 2 pages; we use roundup just to be
5786 	 * paranoid; we calculate it once here, rather than on
5787 	 * ever buf allocate
5788 	 */
5789 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5790 
5791 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5792 
5793 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5794 		(sizeof(u64) * BITS_PER_BYTE / 2);
5795 }
5796 
5797 /*
5798  * The chip base addresses in cspec and cpspec have to be set
5799  * after possible init_chip_wc_pat(), rather than in
5800  * get_7322_chip_params(), so split out as separate function
5801  */
qib_7322_set_baseaddrs(struct qib_devdata * dd)5802 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5803 {
5804 	u32 cregbase;
5805 
5806 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5807 
5808 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5809 		(char __iomem *)dd->kregbase);
5810 
5811 	dd->egrtidbase = (u64 __iomem *)
5812 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5813 
5814 	/* port registers are defined as relative to base of chip */
5815 	dd->pport[0].cpspec->kpregbase =
5816 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5817 	dd->pport[1].cpspec->kpregbase =
5818 		(u64 __iomem *)(dd->palign +
5819 		(char __iomem *)dd->kregbase);
5820 	dd->pport[0].cpspec->cpregbase =
5821 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5822 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5823 	dd->pport[1].cpspec->cpregbase =
5824 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5825 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5826 }
5827 
5828 /*
5829  * This is a fairly special-purpose observer, so we only support
5830  * the port-specific parts of SendCtrl
5831  */
5832 
5833 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5834 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5835 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5836 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5837 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5838 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5839 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5840 
sendctrl_hook(struct qib_devdata * dd,const struct diag_observer * op,u32 offs,u64 * data,u64 mask,int only_32)5841 static int sendctrl_hook(struct qib_devdata *dd,
5842 			 const struct diag_observer *op, u32 offs,
5843 			 u64 *data, u64 mask, int only_32)
5844 {
5845 	unsigned long flags;
5846 	unsigned idx;
5847 	unsigned pidx;
5848 	struct qib_pportdata *ppd = NULL;
5849 	u64 local_data, all_bits;
5850 
5851 	/*
5852 	 * The fixed correspondence between Physical ports and pports is
5853 	 * severed. We need to hunt for the ppd that corresponds
5854 	 * to the offset we got. And we have to do that without admitting
5855 	 * we know the stride, apparently.
5856 	 */
5857 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5858 		u64 __iomem *psptr;
5859 		u32 psoffs;
5860 
5861 		ppd = dd->pport + pidx;
5862 		if (!ppd->cpspec->kpregbase)
5863 			continue;
5864 
5865 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5866 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5867 		if (psoffs == offs)
5868 			break;
5869 	}
5870 
5871 	/* If pport is not being managed by driver, just avoid shadows. */
5872 	if (pidx >= dd->num_pports)
5873 		ppd = NULL;
5874 
5875 	/* In any case, "idx" is flat index in kreg space */
5876 	idx = offs / sizeof(u64);
5877 
5878 	all_bits = ~0ULL;
5879 	if (only_32)
5880 		all_bits >>= 32;
5881 
5882 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5883 	if (!ppd || (mask & all_bits) != all_bits) {
5884 		/*
5885 		 * At least some mask bits are zero, so we need
5886 		 * to read. The judgement call is whether from
5887 		 * reg or shadow. First-cut: read reg, and complain
5888 		 * if any bits which should be shadowed are different
5889 		 * from their shadowed value.
5890 		 */
5891 		if (only_32)
5892 			local_data = (u64)qib_read_kreg32(dd, idx);
5893 		else
5894 			local_data = qib_read_kreg64(dd, idx);
5895 		*data = (local_data & ~mask) | (*data & mask);
5896 	}
5897 	if (mask) {
5898 		/*
5899 		 * At least some mask bits are one, so we need
5900 		 * to write, but only shadow some bits.
5901 		 */
5902 		u64 sval, tval; /* Shadowed, transient */
5903 
5904 		/*
5905 		 * New shadow val is bits we don't want to touch,
5906 		 * ORed with bits we do, that are intended for shadow.
5907 		 */
5908 		if (ppd) {
5909 			sval = ppd->p_sendctrl & ~mask;
5910 			sval |= *data & SENDCTRL_SHADOWED & mask;
5911 			ppd->p_sendctrl = sval;
5912 		} else
5913 			sval = *data & SENDCTRL_SHADOWED & mask;
5914 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5915 		qib_write_kreg(dd, idx, tval);
5916 		qib_write_kreg(dd, kr_scratch, 0Ull);
5917 	}
5918 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5919 	return only_32 ? 4 : 8;
5920 }
5921 
5922 static const struct diag_observer sendctrl_0_observer = {
5923 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5924 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5925 };
5926 
5927 static const struct diag_observer sendctrl_1_observer = {
5928 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5929 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5930 };
5931 
5932 static ushort sdma_fetch_prio = 8;
5933 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5934 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5935 
5936 /* Besides logging QSFP events, we set appropriate TxDDS values */
5937 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5938 
qsfp_7322_event(struct work_struct * work)5939 static void qsfp_7322_event(struct work_struct *work)
5940 {
5941 	struct qib_qsfp_data *qd;
5942 	struct qib_pportdata *ppd;
5943 	unsigned long pwrup;
5944 	unsigned long flags;
5945 	int ret;
5946 	u32 le2;
5947 
5948 	qd = container_of(work, struct qib_qsfp_data, work);
5949 	ppd = qd->ppd;
5950 	pwrup = qd->t_insert +
5951 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5952 
5953 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
5954 	mdelay(QSFP_MODPRS_LAG_MSEC);
5955 
5956 	if (!qib_qsfp_mod_present(ppd)) {
5957 		ppd->cpspec->qsfp_data.modpresent = 0;
5958 		/* Set the physical link to disabled */
5959 		qib_set_ib_7322_lstate(ppd, 0,
5960 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5961 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5962 		ppd->lflags &= ~QIBL_LINKV;
5963 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5964 	} else {
5965 		/*
5966 		 * Some QSFP's not only do not respond until the full power-up
5967 		 * time, but may behave badly if we try. So hold off responding
5968 		 * to insertion.
5969 		 */
5970 		while (1) {
5971 			if (time_is_before_jiffies(pwrup))
5972 				break;
5973 			msleep(20);
5974 		}
5975 
5976 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5977 
5978 		/*
5979 		 * Need to change LE2 back to defaults if we couldn't
5980 		 * read the cable type (to handle cable swaps), so do this
5981 		 * even on failure to read cable information.  We don't
5982 		 * get here for QME, so IS_QME check not needed here.
5983 		 */
5984 		if (!ret && !ppd->dd->cspec->r1) {
5985 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5986 				le2 = LE2_QME;
5987 			else if (qd->cache.atten[1] >= qib_long_atten &&
5988 				 QSFP_IS_CU(qd->cache.tech))
5989 				le2 = LE2_5m;
5990 			else
5991 				le2 = LE2_DEFAULT;
5992 		} else
5993 			le2 = LE2_DEFAULT;
5994 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5995 		/*
5996 		 * We always change parameteters, since we can choose
5997 		 * values for cables without eeproms, and the cable may have
5998 		 * changed from a cable with full or partial eeprom content
5999 		 * to one with partial or no content.
6000 		 */
6001 		init_txdds_table(ppd, 0);
6002 		/* The physical link is being re-enabled only when the
6003 		 * previous state was DISABLED and the VALID bit is not
6004 		 * set. This should only happen when  the cable has been
6005 		 * physically pulled. */
6006 		if (!ppd->cpspec->qsfp_data.modpresent &&
6007 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6008 			ppd->cpspec->qsfp_data.modpresent = 1;
6009 			qib_set_ib_7322_lstate(ppd, 0,
6010 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6011 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6012 			ppd->lflags |= QIBL_LINKV;
6013 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6014 		}
6015 	}
6016 }
6017 
6018 /*
6019  * There is little we can do but complain to the user if QSFP
6020  * initialization fails.
6021  */
qib_init_7322_qsfp(struct qib_pportdata * ppd)6022 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6023 {
6024 	unsigned long flags;
6025 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6026 	struct qib_devdata *dd = ppd->dd;
6027 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6028 
6029 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6030 	qd->ppd = ppd;
6031 	qib_qsfp_init(qd, qsfp_7322_event);
6032 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6033 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6034 	dd->cspec->gpio_mask |= mod_prs_bit;
6035 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6036 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6037 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6038 }
6039 
6040 /*
6041  * called at device initialization time, and also if the txselect
6042  * module parameter is changed.  This is used for cables that don't
6043  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6044  * We initialize to the default, then if there is a specific
6045  * unit,port match, we use that (and set it immediately, for the
6046  * current speed, if the link is at INIT or better).
6047  * String format is "default# unit#,port#=# ... u,p=#", separators must
6048  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6049  * optionally have "u,p=#,#", where the final # is the H1 value
6050  * The last specific match is used (actually, all are used, but last
6051  * one is the one that winds up set); if none at all, fall back on default.
6052  */
set_no_qsfp_atten(struct qib_devdata * dd,int change)6053 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6054 {
6055 	char *nxt, *str;
6056 	u32 pidx, unit, port, deflt, h1;
6057 	unsigned long val;
6058 	int any = 0, seth1;
6059 	int txdds_size;
6060 
6061 	str = txselect_list;
6062 
6063 	/* default number is validated in setup_txselect() */
6064 	deflt = simple_strtoul(str, &nxt, 0);
6065 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6066 		dd->pport[pidx].cpspec->no_eep = deflt;
6067 
6068 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6069 	if (IS_QME(dd) || IS_QMH(dd))
6070 		txdds_size += TXDDS_MFG_SZ;
6071 
6072 	while (*nxt && nxt[1]) {
6073 		str = ++nxt;
6074 		unit = simple_strtoul(str, &nxt, 0);
6075 		if (nxt == str || !*nxt || *nxt != ',') {
6076 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6077 				;
6078 			continue;
6079 		}
6080 		str = ++nxt;
6081 		port = simple_strtoul(str, &nxt, 0);
6082 		if (nxt == str || *nxt != '=') {
6083 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6084 				;
6085 			continue;
6086 		}
6087 		str = ++nxt;
6088 		val = simple_strtoul(str, &nxt, 0);
6089 		if (nxt == str) {
6090 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6091 				;
6092 			continue;
6093 		}
6094 		if (val >= txdds_size)
6095 			continue;
6096 		seth1 = 0;
6097 		h1 = 0; /* gcc thinks it might be used uninitted */
6098 		if (*nxt == ',' && nxt[1]) {
6099 			str = ++nxt;
6100 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6101 			if (nxt == str)
6102 				while (*nxt && *nxt++ != ' ') /* skip */
6103 					;
6104 			else
6105 				seth1 = 1;
6106 		}
6107 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6108 		     ++pidx) {
6109 			struct qib_pportdata *ppd = &dd->pport[pidx];
6110 
6111 			if (ppd->port != port || !ppd->link_speed_supported)
6112 				continue;
6113 			ppd->cpspec->no_eep = val;
6114 			if (seth1)
6115 				ppd->cpspec->h1_val = h1;
6116 			/* now change the IBC and serdes, overriding generic */
6117 			init_txdds_table(ppd, 1);
6118 			/* Re-enable the physical state machine on mezz boards
6119 			 * now that the correct settings have been set.
6120 			 * QSFP boards are handles by the QSFP event handler */
6121 			if (IS_QMH(dd) || IS_QME(dd))
6122 				qib_set_ib_7322_lstate(ppd, 0,
6123 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6124 			any++;
6125 		}
6126 		if (*nxt == '\n')
6127 			break; /* done */
6128 	}
6129 	if (change && !any) {
6130 		/* no specific setting, use the default.
6131 		 * Change the IBC and serdes, but since it's
6132 		 * general, don't override specific settings.
6133 		 */
6134 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6135 			if (dd->pport[pidx].link_speed_supported)
6136 				init_txdds_table(&dd->pport[pidx], 0);
6137 	}
6138 }
6139 
6140 /* handle the txselect parameter changing */
setup_txselect(const char * str,const struct kernel_param * kp)6141 static int setup_txselect(const char *str, const struct kernel_param *kp)
6142 {
6143 	struct qib_devdata *dd;
6144 	unsigned long val;
6145 	char *n;
6146 
6147 	if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6148 		pr_info("txselect_values string too long\n");
6149 		return -ENOSPC;
6150 	}
6151 	val = simple_strtoul(str, &n, 0);
6152 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6153 				TXDDS_MFG_SZ)) {
6154 		pr_info("txselect_values must start with a number < %d\n",
6155 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6156 		return -EINVAL;
6157 	}
6158 	strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6159 
6160 	list_for_each_entry(dd, &qib_dev_list, list)
6161 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6162 			set_no_qsfp_atten(dd, 1);
6163 	return 0;
6164 }
6165 
6166 /*
6167  * Write the final few registers that depend on some of the
6168  * init setup.  Done late in init, just before bringing up
6169  * the serdes.
6170  */
qib_late_7322_initreg(struct qib_devdata * dd)6171 static int qib_late_7322_initreg(struct qib_devdata *dd)
6172 {
6173 	int ret = 0, n;
6174 	u64 val;
6175 
6176 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6177 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6178 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6179 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6180 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6181 	if (val != dd->pioavailregs_phys) {
6182 		qib_dev_err(dd,
6183 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6184 			(unsigned long) dd->pioavailregs_phys,
6185 			(unsigned long long) val);
6186 		ret = -EINVAL;
6187 	}
6188 
6189 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6190 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6191 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6192 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6193 
6194 	qib_register_observer(dd, &sendctrl_0_observer);
6195 	qib_register_observer(dd, &sendctrl_1_observer);
6196 
6197 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6198 	qib_write_kreg(dd, kr_control, dd->control);
6199 	/*
6200 	 * Set SendDmaFetchPriority and init Tx params, including
6201 	 * QSFP handler on boards that have QSFP.
6202 	 * First set our default attenuation entry for cables that
6203 	 * don't have valid attenuation.
6204 	 */
6205 	set_no_qsfp_atten(dd, 0);
6206 	for (n = 0; n < dd->num_pports; ++n) {
6207 		struct qib_pportdata *ppd = dd->pport + n;
6208 
6209 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6210 				    sdma_fetch_prio & 0xf);
6211 		/* Initialize qsfp if present on board. */
6212 		if (dd->flags & QIB_HAS_QSFP)
6213 			qib_init_7322_qsfp(ppd);
6214 	}
6215 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6216 	qib_write_kreg(dd, kr_control, dd->control);
6217 
6218 	return ret;
6219 }
6220 
6221 /* per IB port errors.  */
6222 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6223 	MASK_ACROSS(8, 15))
6224 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6225 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6226 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6227 	MASK_ACROSS(0, 11))
6228 
6229 /*
6230  * Write the initialization per-port registers that need to be done at
6231  * driver load and after reset completes (i.e., that aren't done as part
6232  * of other init procedures called from qib_init.c).
6233  * Some of these should be redundant on reset, but play safe.
6234  */
write_7322_init_portregs(struct qib_pportdata * ppd)6235 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6236 {
6237 	u64 val;
6238 	int i;
6239 
6240 	if (!ppd->link_speed_supported) {
6241 		/* no buffer credits for this port */
6242 		for (i = 1; i < 8; i++)
6243 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6244 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6245 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6246 		return;
6247 	}
6248 
6249 	/*
6250 	 * Set the number of supported virtual lanes in IBC,
6251 	 * for flow control packet handling on unsupported VLs
6252 	 */
6253 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6254 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6255 	val |= (u64)(ppd->vls_supported - 1) <<
6256 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6257 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6258 
6259 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6260 
6261 	/* enable tx header checking */
6262 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6263 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6264 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6265 
6266 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6267 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6268 
6269 	/*
6270 	 * Unconditionally clear the bufmask bits.  If SDMA is
6271 	 * enabled, we'll set them appropriately later.
6272 	 */
6273 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6274 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6275 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6276 	if (ppd->dd->cspec->r1)
6277 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6278 }
6279 
6280 /*
6281  * Write the initialization per-device registers that need to be done at
6282  * driver load and after reset completes (i.e., that aren't done as part
6283  * of other init procedures called from qib_init.c).  Also write per-port
6284  * registers that are affected by overall device config, such as QP mapping
6285  * Some of these should be redundant on reset, but play safe.
6286  */
write_7322_initregs(struct qib_devdata * dd)6287 static void write_7322_initregs(struct qib_devdata *dd)
6288 {
6289 	struct qib_pportdata *ppd;
6290 	int i, pidx;
6291 	u64 val;
6292 
6293 	/* Set Multicast QPs received by port 2 to map to context one. */
6294 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6295 
6296 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6297 		unsigned n, regno;
6298 		unsigned long flags;
6299 
6300 		if (dd->n_krcv_queues < 2 ||
6301 			!dd->pport[pidx].link_speed_supported)
6302 			continue;
6303 
6304 		ppd = &dd->pport[pidx];
6305 
6306 		/* be paranoid against later code motion, etc. */
6307 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6308 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6309 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6310 
6311 		/* Initialize QP to context mapping */
6312 		regno = krp_rcvqpmaptable;
6313 		val = 0;
6314 		if (dd->num_pports > 1)
6315 			n = dd->first_user_ctxt / dd->num_pports;
6316 		else
6317 			n = dd->first_user_ctxt - 1;
6318 		for (i = 0; i < 32; ) {
6319 			unsigned ctxt;
6320 
6321 			if (dd->num_pports > 1)
6322 				ctxt = (i % n) * dd->num_pports + pidx;
6323 			else if (i % n)
6324 				ctxt = (i % n) + 1;
6325 			else
6326 				ctxt = ppd->hw_pidx;
6327 			val |= ctxt << (5 * (i % 6));
6328 			i++;
6329 			if (i % 6 == 0) {
6330 				qib_write_kreg_port(ppd, regno, val);
6331 				val = 0;
6332 				regno++;
6333 			}
6334 		}
6335 		qib_write_kreg_port(ppd, regno, val);
6336 	}
6337 
6338 	/*
6339 	 * Setup up interrupt mitigation for kernel contexts, but
6340 	 * not user contexts (user contexts use interrupts when
6341 	 * stalled waiting for any packet, so want those interrupts
6342 	 * right away).
6343 	 */
6344 	for (i = 0; i < dd->first_user_ctxt; i++) {
6345 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6346 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6347 	}
6348 
6349 	/*
6350 	 * Initialize  as (disabled) rcvflow tables.  Application code
6351 	 * will setup each flow as it uses the flow.
6352 	 * Doesn't clear any of the error bits that might be set.
6353 	 */
6354 	val = TIDFLOW_ERRBITS; /* these are W1C */
6355 	for (i = 0; i < dd->cfgctxts; i++) {
6356 		int flow;
6357 
6358 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6359 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6360 	}
6361 
6362 	/*
6363 	 * dual cards init to dual port recovery, single port cards to
6364 	 * the one port.  Dual port cards may later adjust to 1 port,
6365 	 * and then back to dual port if both ports are connected
6366 	 * */
6367 	if (dd->num_pports)
6368 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6369 }
6370 
qib_init_7322_variables(struct qib_devdata * dd)6371 static int qib_init_7322_variables(struct qib_devdata *dd)
6372 {
6373 	struct qib_pportdata *ppd;
6374 	unsigned features, pidx, sbufcnt;
6375 	int ret, mtu;
6376 	u32 sbufs, updthresh;
6377 	resource_size_t vl15off;
6378 
6379 	/* pport structs are contiguous, allocated after devdata */
6380 	ppd = (struct qib_pportdata *)(dd + 1);
6381 	dd->pport = ppd;
6382 	ppd[0].dd = dd;
6383 	ppd[1].dd = dd;
6384 
6385 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6386 
6387 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6388 	ppd[1].cpspec = &ppd[0].cpspec[1];
6389 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6390 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6391 
6392 	spin_lock_init(&dd->cspec->rcvmod_lock);
6393 	spin_lock_init(&dd->cspec->gpio_lock);
6394 
6395 	/* we haven't yet set QIB_PRESENT, so use read directly */
6396 	dd->revision = readq(&dd->kregbase[kr_revision]);
6397 
6398 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6399 		qib_dev_err(dd,
6400 			"Revision register read failure, giving up initialization\n");
6401 		ret = -ENODEV;
6402 		goto bail;
6403 	}
6404 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6405 
6406 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6407 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6408 	dd->cspec->r1 = dd->minrev == 1;
6409 
6410 	get_7322_chip_params(dd);
6411 	features = qib_7322_boardname(dd);
6412 
6413 	/* now that piobcnt2k and 4k set, we can allocate these */
6414 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6415 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6416 	sbufcnt /= BITS_PER_LONG;
6417 	dd->cspec->sendchkenable =
6418 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6419 			      GFP_KERNEL);
6420 	dd->cspec->sendgrhchk =
6421 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6422 			      GFP_KERNEL);
6423 	dd->cspec->sendibchk =
6424 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6425 			      GFP_KERNEL);
6426 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6427 		!dd->cspec->sendibchk) {
6428 		ret = -ENOMEM;
6429 		goto bail;
6430 	}
6431 
6432 	ppd = dd->pport;
6433 
6434 	/*
6435 	 * GPIO bits for TWSI data and clock,
6436 	 * used for serial EEPROM.
6437 	 */
6438 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6439 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6440 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6441 
6442 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6443 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6444 		QIB_HAS_THRESH_UPDATE |
6445 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6446 	dd->flags |= qib_special_trigger ?
6447 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6448 
6449 	/*
6450 	 * Setup initial values.  These may change when PAT is enabled, but
6451 	 * we need these to do initial chip register accesses.
6452 	 */
6453 	qib_7322_set_baseaddrs(dd);
6454 
6455 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6456 	if (mtu == -1)
6457 		mtu = QIB_DEFAULT_MTU;
6458 
6459 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6460 	/* all hwerrors become interrupts, unless special purposed */
6461 	dd->cspec->hwerrmask = ~0ULL;
6462 	/*  link_recovery setup causes these errors, so ignore them,
6463 	 *  other than clearing them when they occur */
6464 	dd->cspec->hwerrmask &=
6465 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6466 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6467 		  HWE_MASK(LATriggered));
6468 
6469 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6470 		struct qib_chippport_specific *cp = ppd->cpspec;
6471 
6472 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6473 		features >>=  PORT_SPD_CAP_SHIFT;
6474 		if (!ppd->link_speed_supported) {
6475 			/* single port mode (7340, or configured) */
6476 			dd->skip_kctxt_mask |= 1 << pidx;
6477 			if (pidx == 0) {
6478 				/* Make sure port is disabled. */
6479 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6480 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6481 				ppd[0] = ppd[1];
6482 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6483 						  IBSerdesPClkNotDetectMask_0)
6484 						  | SYM_MASK(HwErrMask,
6485 						  SDmaMemReadErrMask_0));
6486 				dd->cspec->int_enable_mask &= ~(
6487 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6488 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6489 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6490 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6491 				     SYM_MASK(IntMask, ErrIntMask_0) |
6492 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6493 			} else {
6494 				/* Make sure port is disabled. */
6495 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6496 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6497 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6498 						  IBSerdesPClkNotDetectMask_1)
6499 						  | SYM_MASK(HwErrMask,
6500 						  SDmaMemReadErrMask_1));
6501 				dd->cspec->int_enable_mask &= ~(
6502 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6503 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6504 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6505 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6506 				     SYM_MASK(IntMask, ErrIntMask_1) |
6507 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6508 			}
6509 			continue;
6510 		}
6511 
6512 		dd->num_pports++;
6513 		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6514 		if (ret) {
6515 			dd->num_pports--;
6516 			goto bail;
6517 		}
6518 
6519 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6520 		ppd->link_width_enabled = IB_WIDTH_4X;
6521 		ppd->link_speed_enabled = ppd->link_speed_supported;
6522 		/*
6523 		 * Set the initial values to reasonable default, will be set
6524 		 * for real when link is up.
6525 		 */
6526 		ppd->link_width_active = IB_WIDTH_4X;
6527 		ppd->link_speed_active = QIB_IB_SDR;
6528 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6529 		switch (qib_num_cfg_vls) {
6530 		case 1:
6531 			ppd->vls_supported = IB_VL_VL0;
6532 			break;
6533 		case 2:
6534 			ppd->vls_supported = IB_VL_VL0_1;
6535 			break;
6536 		default:
6537 			qib_devinfo(dd->pcidev,
6538 				    "Invalid num_vls %u, using 4 VLs\n",
6539 				    qib_num_cfg_vls);
6540 			qib_num_cfg_vls = 4;
6541 			/* fall through */
6542 		case 4:
6543 			ppd->vls_supported = IB_VL_VL0_3;
6544 			break;
6545 		case 8:
6546 			if (mtu <= 2048)
6547 				ppd->vls_supported = IB_VL_VL0_7;
6548 			else {
6549 				qib_devinfo(dd->pcidev,
6550 					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6551 					    qib_num_cfg_vls, mtu);
6552 				ppd->vls_supported = IB_VL_VL0_3;
6553 				qib_num_cfg_vls = 4;
6554 			}
6555 			break;
6556 		}
6557 		ppd->vls_operational = ppd->vls_supported;
6558 
6559 		init_waitqueue_head(&cp->autoneg_wait);
6560 		INIT_DELAYED_WORK(&cp->autoneg_work,
6561 				  autoneg_7322_work);
6562 		if (ppd->dd->cspec->r1)
6563 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6564 
6565 		/*
6566 		 * For Mez and similar cards, no qsfp info, so do
6567 		 * the "cable info" setup here.  Can be overridden
6568 		 * in adapter-specific routines.
6569 		 */
6570 		if (!(dd->flags & QIB_HAS_QSFP)) {
6571 			if (!IS_QMH(dd) && !IS_QME(dd))
6572 				qib_devinfo(dd->pcidev,
6573 					"IB%u:%u: Unknown mezzanine card type\n",
6574 					dd->unit, ppd->port);
6575 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6576 			/*
6577 			 * Choose center value as default tx serdes setting
6578 			 * until changed through module parameter.
6579 			 */
6580 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6581 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6582 		} else
6583 			cp->h1_val = H1_FORCE_VAL;
6584 
6585 		/* Avoid writes to chip for mini_init */
6586 		if (!qib_mini_init)
6587 			write_7322_init_portregs(ppd);
6588 
6589 		timer_setup(&cp->chase_timer, reenable_chase, 0);
6590 
6591 		ppd++;
6592 	}
6593 
6594 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6595 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6596 	dd->rcvhdrsize = qib_rcvhdrsize ?
6597 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6598 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6599 
6600 	/* we always allocate at least 2048 bytes for eager buffers */
6601 	dd->rcvegrbufsize = max(mtu, 2048);
6602 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6603 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6604 
6605 	qib_7322_tidtemplate(dd);
6606 
6607 	/*
6608 	 * We can request a receive interrupt for 1 or
6609 	 * more packets from current offset.
6610 	 */
6611 	dd->rhdrhead_intr_off =
6612 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6613 
6614 	/* setup the stats timer; the add_timer is done at end of init */
6615 	timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6616 
6617 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6618 
6619 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6620 
6621 	qib_7322_config_ctxts(dd);
6622 	qib_set_ctxtcnt(dd);
6623 
6624 	/*
6625 	 * We do not set WC on the VL15 buffers to avoid
6626 	 * a rare problem with unaligned writes from
6627 	 * interrupt-flushed store buffers, so we need
6628 	 * to map those separately here.  We can't solve
6629 	 * this for the rarely used mtrr case.
6630 	 */
6631 	ret = init_chip_wc_pat(dd, 0);
6632 	if (ret)
6633 		goto bail;
6634 
6635 	/* vl15 buffers start just after the 4k buffers */
6636 	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6637 		  dd->piobcnt4k * dd->align4k;
6638 	dd->piovl15base	= ioremap_nocache(vl15off,
6639 					  NUM_VL15_BUFS * dd->align4k);
6640 	if (!dd->piovl15base) {
6641 		ret = -ENOMEM;
6642 		goto bail;
6643 	}
6644 
6645 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6646 
6647 	ret = 0;
6648 	if (qib_mini_init)
6649 		goto bail;
6650 	if (!dd->num_pports) {
6651 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6652 		goto bail; /* no error, so can still figure out why err */
6653 	}
6654 
6655 	write_7322_initregs(dd);
6656 	ret = qib_create_ctxts(dd);
6657 	init_7322_cntrnames(dd);
6658 
6659 	updthresh = 8U; /* update threshold */
6660 
6661 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6662 	 * reserve the update threshold amount for other kernel use, such
6663 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6664 	 * unless we aren't enabling SDMA, in which case we want to use
6665 	 * all the 4k bufs for the kernel.
6666 	 * if this was less than the update threshold, we could wait
6667 	 * a long time for an update.  Coded this way because we
6668 	 * sometimes change the update threshold for various reasons,
6669 	 * and we want this to remain robust.
6670 	 */
6671 	if (dd->flags & QIB_HAS_SEND_DMA) {
6672 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6673 		sbufs = updthresh > 3 ? updthresh : 3;
6674 	} else {
6675 		dd->cspec->sdmabufcnt = 0;
6676 		sbufs = dd->piobcnt4k;
6677 	}
6678 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6679 		dd->cspec->sdmabufcnt;
6680 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6681 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6682 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6683 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6684 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6685 
6686 	/*
6687 	 * If we have 16 user contexts, we will have 7 sbufs
6688 	 * per context, so reduce the update threshold to match.  We
6689 	 * want to update before we actually run out, at low pbufs/ctxt
6690 	 * so give ourselves some margin.
6691 	 */
6692 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6693 		updthresh = dd->pbufsctxt - 2;
6694 	dd->cspec->updthresh_dflt = updthresh;
6695 	dd->cspec->updthresh = updthresh;
6696 
6697 	/* before full enable, no interrupts, no locking needed */
6698 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6699 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6700 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6701 
6702 	dd->psxmitwait_supported = 1;
6703 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6704 bail:
6705 	if (!dd->ctxtcnt)
6706 		dd->ctxtcnt = 1; /* for other initialization code */
6707 
6708 	return ret;
6709 }
6710 
qib_7322_getsendbuf(struct qib_pportdata * ppd,u64 pbc,u32 * pbufnum)6711 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6712 					u32 *pbufnum)
6713 {
6714 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6715 	struct qib_devdata *dd = ppd->dd;
6716 
6717 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6718 	if (pbc & PBC_7322_VL15_SEND) {
6719 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6720 		last = first;
6721 	} else {
6722 		if ((plen + 1) > dd->piosize2kmax_dwords)
6723 			first = dd->piobcnt2k;
6724 		else
6725 			first = 0;
6726 		last = dd->cspec->lastbuf_for_pio;
6727 	}
6728 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6729 }
6730 
qib_set_cntr_7322_sample(struct qib_pportdata * ppd,u32 intv,u32 start)6731 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6732 				     u32 start)
6733 {
6734 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6735 	qib_write_kreg_port(ppd, krp_psstart, start);
6736 }
6737 
6738 /*
6739  * Must be called with sdma_lock held, or before init finished.
6740  */
qib_sdma_set_7322_desc_cnt(struct qib_pportdata * ppd,unsigned cnt)6741 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6742 {
6743 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6744 }
6745 
6746 /*
6747  * sdma_lock should be acquired before calling this routine
6748  */
dump_sdma_7322_state(struct qib_pportdata * ppd)6749 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6750 {
6751 	u64 reg, reg1, reg2;
6752 
6753 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6754 	qib_dev_porterr(ppd->dd, ppd->port,
6755 		"SDMA senddmastatus: 0x%016llx\n", reg);
6756 
6757 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6758 	qib_dev_porterr(ppd->dd, ppd->port,
6759 		"SDMA sendctrl: 0x%016llx\n", reg);
6760 
6761 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6762 	qib_dev_porterr(ppd->dd, ppd->port,
6763 		"SDMA senddmabase: 0x%016llx\n", reg);
6764 
6765 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6766 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6767 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6768 	qib_dev_porterr(ppd->dd, ppd->port,
6769 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6770 		 reg, reg1, reg2);
6771 
6772 	/* get bufuse bits, clear them, and print them again if non-zero */
6773 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6774 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6775 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6776 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6777 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6778 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6779 	/* 0 and 1 should always be zero, so print as short form */
6780 	qib_dev_porterr(ppd->dd, ppd->port,
6781 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6782 		 reg, reg1, reg2);
6783 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6784 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6785 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6786 	/* 0 and 1 should always be zero, so print as short form */
6787 	qib_dev_porterr(ppd->dd, ppd->port,
6788 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6789 		 reg, reg1, reg2);
6790 
6791 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6792 	qib_dev_porterr(ppd->dd, ppd->port,
6793 		"SDMA senddmatail: 0x%016llx\n", reg);
6794 
6795 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6796 	qib_dev_porterr(ppd->dd, ppd->port,
6797 		"SDMA senddmahead: 0x%016llx\n", reg);
6798 
6799 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6800 	qib_dev_porterr(ppd->dd, ppd->port,
6801 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6802 
6803 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6804 	qib_dev_porterr(ppd->dd, ppd->port,
6805 		"SDMA senddmalengen: 0x%016llx\n", reg);
6806 
6807 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6808 	qib_dev_porterr(ppd->dd, ppd->port,
6809 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6810 
6811 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6812 	qib_dev_porterr(ppd->dd, ppd->port,
6813 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6814 
6815 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6816 	qib_dev_porterr(ppd->dd, ppd->port,
6817 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6818 
6819 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6820 	qib_dev_porterr(ppd->dd, ppd->port,
6821 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6822 
6823 	dump_sdma_state(ppd);
6824 }
6825 
6826 static struct sdma_set_state_action sdma_7322_action_table[] = {
6827 	[qib_sdma_state_s00_hw_down] = {
6828 		.go_s99_running_tofalse = 1,
6829 		.op_enable = 0,
6830 		.op_intenable = 0,
6831 		.op_halt = 0,
6832 		.op_drain = 0,
6833 	},
6834 	[qib_sdma_state_s10_hw_start_up_wait] = {
6835 		.op_enable = 0,
6836 		.op_intenable = 1,
6837 		.op_halt = 1,
6838 		.op_drain = 0,
6839 	},
6840 	[qib_sdma_state_s20_idle] = {
6841 		.op_enable = 1,
6842 		.op_intenable = 1,
6843 		.op_halt = 1,
6844 		.op_drain = 0,
6845 	},
6846 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6847 		.op_enable = 0,
6848 		.op_intenable = 1,
6849 		.op_halt = 1,
6850 		.op_drain = 0,
6851 	},
6852 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6853 		.op_enable = 1,
6854 		.op_intenable = 1,
6855 		.op_halt = 1,
6856 		.op_drain = 0,
6857 	},
6858 	[qib_sdma_state_s50_hw_halt_wait] = {
6859 		.op_enable = 1,
6860 		.op_intenable = 1,
6861 		.op_halt = 1,
6862 		.op_drain = 1,
6863 	},
6864 	[qib_sdma_state_s99_running] = {
6865 		.op_enable = 1,
6866 		.op_intenable = 1,
6867 		.op_halt = 0,
6868 		.op_drain = 0,
6869 		.go_s99_running_totrue = 1,
6870 	},
6871 };
6872 
qib_7322_sdma_init_early(struct qib_pportdata * ppd)6873 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6874 {
6875 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6876 }
6877 
init_sdma_7322_regs(struct qib_pportdata * ppd)6878 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6879 {
6880 	struct qib_devdata *dd = ppd->dd;
6881 	unsigned lastbuf, erstbuf;
6882 	u64 senddmabufmask[3] = { 0 };
6883 	int n, ret = 0;
6884 
6885 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6886 	qib_sdma_7322_setlengen(ppd);
6887 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6888 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6889 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6890 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6891 
6892 	if (dd->num_pports)
6893 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6894 	else
6895 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6896 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6897 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6898 		dd->cspec->sdmabufcnt);
6899 	lastbuf = erstbuf + n;
6900 
6901 	ppd->sdma_state.first_sendbuf = erstbuf;
6902 	ppd->sdma_state.last_sendbuf = lastbuf;
6903 	for (; erstbuf < lastbuf; ++erstbuf) {
6904 		unsigned word = erstbuf / BITS_PER_LONG;
6905 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6906 
6907 		BUG_ON(word >= 3);
6908 		senddmabufmask[word] |= 1ULL << bit;
6909 	}
6910 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6911 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6912 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6913 	return ret;
6914 }
6915 
6916 /* sdma_lock must be held */
qib_sdma_7322_gethead(struct qib_pportdata * ppd)6917 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6918 {
6919 	struct qib_devdata *dd = ppd->dd;
6920 	int sane;
6921 	int use_dmahead;
6922 	u16 swhead;
6923 	u16 swtail;
6924 	u16 cnt;
6925 	u16 hwhead;
6926 
6927 	use_dmahead = __qib_sdma_running(ppd) &&
6928 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6929 retry:
6930 	hwhead = use_dmahead ?
6931 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6932 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6933 
6934 	swhead = ppd->sdma_descq_head;
6935 	swtail = ppd->sdma_descq_tail;
6936 	cnt = ppd->sdma_descq_cnt;
6937 
6938 	if (swhead < swtail)
6939 		/* not wrapped */
6940 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6941 	else if (swhead > swtail)
6942 		/* wrapped around */
6943 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6944 			(hwhead <= swtail);
6945 	else
6946 		/* empty */
6947 		sane = (hwhead == swhead);
6948 
6949 	if (unlikely(!sane)) {
6950 		if (use_dmahead) {
6951 			/* try one more time, directly from the register */
6952 			use_dmahead = 0;
6953 			goto retry;
6954 		}
6955 		/* proceed as if no progress */
6956 		hwhead = swhead;
6957 	}
6958 
6959 	return hwhead;
6960 }
6961 
qib_sdma_7322_busy(struct qib_pportdata * ppd)6962 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6963 {
6964 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6965 
6966 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6967 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6968 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6969 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6970 }
6971 
6972 /*
6973  * Compute the amount of delay before sending the next packet if the
6974  * port's send rate differs from the static rate set for the QP.
6975  * The delay affects the next packet and the amount of the delay is
6976  * based on the length of the this packet.
6977  */
qib_7322_setpbc_control(struct qib_pportdata * ppd,u32 plen,u8 srate,u8 vl)6978 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6979 				   u8 srate, u8 vl)
6980 {
6981 	u8 snd_mult = ppd->delay_mult;
6982 	u8 rcv_mult = ib_rate_to_delay[srate];
6983 	u32 ret;
6984 
6985 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6986 
6987 	/* Indicate VL15, else set the VL in the control word */
6988 	if (vl == 15)
6989 		ret |= PBC_7322_VL15_SEND_CTRL;
6990 	else
6991 		ret |= vl << PBC_VL_NUM_LSB;
6992 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6993 
6994 	return ret;
6995 }
6996 
6997 /*
6998  * Enable the per-port VL15 send buffers for use.
6999  * They follow the rest of the buffers, without a config parameter.
7000  * This was in initregs, but that is done before the shadow
7001  * is set up, and this has to be done after the shadow is
7002  * set up.
7003  */
qib_7322_initvl15_bufs(struct qib_devdata * dd)7004 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7005 {
7006 	unsigned vl15bufs;
7007 
7008 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7009 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7010 			       TXCHK_CHG_TYPE_KERN, NULL);
7011 }
7012 
qib_7322_init_ctxt(struct qib_ctxtdata * rcd)7013 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7014 {
7015 	if (rcd->ctxt < NUM_IB_PORTS) {
7016 		if (rcd->dd->num_pports > 1) {
7017 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7018 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7019 		} else {
7020 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7021 			rcd->rcvegr_tid_base = 0;
7022 		}
7023 	} else {
7024 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7025 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7026 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7027 	}
7028 }
7029 
7030 #define QTXSLEEPS 5000
qib_7322_txchk_change(struct qib_devdata * dd,u32 start,u32 len,u32 which,struct qib_ctxtdata * rcd)7031 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7032 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7033 {
7034 	int i;
7035 	const int last = start + len - 1;
7036 	const int lastr = last / BITS_PER_LONG;
7037 	u32 sleeps = 0;
7038 	int wait = rcd != NULL;
7039 	unsigned long flags;
7040 
7041 	while (wait) {
7042 		unsigned long shadow = 0;
7043 		int cstart, previ = -1;
7044 
7045 		/*
7046 		 * when flipping from kernel to user, we can't change
7047 		 * the checking type if the buffer is allocated to the
7048 		 * driver.   It's OK the other direction, because it's
7049 		 * from close, and we have just disarm'ed all the
7050 		 * buffers.  All the kernel to kernel changes are also
7051 		 * OK.
7052 		 */
7053 		for (cstart = start; cstart <= last; cstart++) {
7054 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7055 				/ BITS_PER_LONG;
7056 			if (i != previ) {
7057 				shadow = (unsigned long)
7058 					le64_to_cpu(dd->pioavailregs_dma[i]);
7059 				previ = i;
7060 			}
7061 			if (test_bit(((2 * cstart) +
7062 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7063 				     % BITS_PER_LONG, &shadow))
7064 				break;
7065 		}
7066 
7067 		if (cstart > last)
7068 			break;
7069 
7070 		if (sleeps == QTXSLEEPS)
7071 			break;
7072 		/* make sure we see an updated copy next time around */
7073 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7074 		sleeps++;
7075 		msleep(20);
7076 	}
7077 
7078 	switch (which) {
7079 	case TXCHK_CHG_TYPE_DIS1:
7080 		/*
7081 		 * disable checking on a range; used by diags; just
7082 		 * one buffer, but still written generically
7083 		 */
7084 		for (i = start; i <= last; i++)
7085 			clear_bit(i, dd->cspec->sendchkenable);
7086 		break;
7087 
7088 	case TXCHK_CHG_TYPE_ENAB1:
7089 		/*
7090 		 * (re)enable checking on a range; used by diags; just
7091 		 * one buffer, but still written generically; read
7092 		 * scratch to be sure buffer actually triggered, not
7093 		 * just flushed from processor.
7094 		 */
7095 		qib_read_kreg32(dd, kr_scratch);
7096 		for (i = start; i <= last; i++)
7097 			set_bit(i, dd->cspec->sendchkenable);
7098 		break;
7099 
7100 	case TXCHK_CHG_TYPE_KERN:
7101 		/* usable by kernel */
7102 		for (i = start; i <= last; i++) {
7103 			set_bit(i, dd->cspec->sendibchk);
7104 			clear_bit(i, dd->cspec->sendgrhchk);
7105 		}
7106 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7107 		/* see if we need to raise avail update threshold */
7108 		for (i = dd->first_user_ctxt;
7109 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7110 		     && i < dd->cfgctxts; i++)
7111 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7112 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7113 			   < dd->cspec->updthresh_dflt)
7114 				break;
7115 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7116 		if (i == dd->cfgctxts) {
7117 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7118 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7119 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7120 			dd->sendctrl |= (dd->cspec->updthresh &
7121 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7122 					   SYM_LSB(SendCtrl, AvailUpdThld);
7123 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7124 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7125 		}
7126 		break;
7127 
7128 	case TXCHK_CHG_TYPE_USER:
7129 		/* for user process */
7130 		for (i = start; i <= last; i++) {
7131 			clear_bit(i, dd->cspec->sendibchk);
7132 			set_bit(i, dd->cspec->sendgrhchk);
7133 		}
7134 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7135 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7136 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7137 			dd->cspec->updthresh = (rcd->piocnt /
7138 						rcd->subctxt_cnt) - 1;
7139 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7140 			dd->sendctrl |= (dd->cspec->updthresh &
7141 					SYM_RMASK(SendCtrl, AvailUpdThld))
7142 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7143 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7144 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7145 		} else
7146 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7147 		break;
7148 
7149 	default:
7150 		break;
7151 	}
7152 
7153 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7154 		qib_write_kreg(dd, kr_sendcheckmask + i,
7155 			       dd->cspec->sendchkenable[i]);
7156 
7157 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7158 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7159 			       dd->cspec->sendgrhchk[i]);
7160 		qib_write_kreg(dd, kr_sendibpktmask + i,
7161 			       dd->cspec->sendibchk[i]);
7162 	}
7163 
7164 	/*
7165 	 * Be sure whatever we did was seen by the chip and acted upon,
7166 	 * before we return.  Mostly important for which >= 2.
7167 	 */
7168 	qib_read_kreg32(dd, kr_scratch);
7169 }
7170 
7171 
7172 /* useful for trigger analyzers, etc. */
writescratch(struct qib_devdata * dd,u32 val)7173 static void writescratch(struct qib_devdata *dd, u32 val)
7174 {
7175 	qib_write_kreg(dd, kr_scratch, val);
7176 }
7177 
7178 /* Dummy for now, use chip regs soon */
qib_7322_tempsense_rd(struct qib_devdata * dd,int regnum)7179 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7180 {
7181 	return -ENXIO;
7182 }
7183 
7184 /**
7185  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7186  * @dev: the pci_dev for qlogic_ib device
7187  * @ent: pci_device_id struct for this dev
7188  *
7189  * Also allocates, inits, and returns the devdata struct for this
7190  * device instance
7191  *
7192  * This is global, and is called directly at init to set up the
7193  * chip-specific function pointers for later use.
7194  */
qib_init_iba7322_funcs(struct pci_dev * pdev,const struct pci_device_id * ent)7195 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7196 					   const struct pci_device_id *ent)
7197 {
7198 	struct qib_devdata *dd;
7199 	int ret, i;
7200 	u32 tabsize, actual_cnt = 0;
7201 
7202 	dd = qib_alloc_devdata(pdev,
7203 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7204 		sizeof(struct qib_chip_specific) +
7205 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7206 	if (IS_ERR(dd))
7207 		goto bail;
7208 
7209 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7210 	dd->f_cleanup           = qib_setup_7322_cleanup;
7211 	dd->f_clear_tids        = qib_7322_clear_tids;
7212 	dd->f_free_irq          = qib_7322_free_irq;
7213 	dd->f_get_base_info     = qib_7322_get_base_info;
7214 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7215 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7216 	dd->f_gpio_mod          = gpio_7322_mod;
7217 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7218 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7219 	dd->f_ib_updown         = qib_7322_ib_updown;
7220 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7221 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7222 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7223 	dd->f_late_initreg      = qib_late_7322_initreg;
7224 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7225 	dd->f_portcntr          = qib_portcntr_7322;
7226 	dd->f_put_tid           = qib_7322_put_tid;
7227 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7228 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7229 	dd->f_read_cntrs        = qib_read_7322cntrs;
7230 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7231 	dd->f_reset             = qib_do_7322_reset;
7232 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7233 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7234 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7235 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7236 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7237 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7238 	dd->f_sendctrl          = sendctrl_7322_mod;
7239 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7240 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7241 	dd->f_iblink_state      = qib_7322_iblink_state;
7242 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7243 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7244 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7245 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7246 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7247 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7248 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7249 	dd->f_setextled         = qib_setup_7322_setextled;
7250 	dd->f_txchk_change      = qib_7322_txchk_change;
7251 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7252 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7253 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7254 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7255 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7256 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7257 	dd->f_writescratch      = writescratch;
7258 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7259 #ifdef CONFIG_INFINIBAND_QIB_DCA
7260 	dd->f_notify_dca	= qib_7322_notify_dca;
7261 #endif
7262 	/*
7263 	 * Do remaining PCIe setup and save PCIe values in dd.
7264 	 * Any error printing is already done by the init code.
7265 	 * On return, we have the chip mapped, but chip registers
7266 	 * are not set up until start of qib_init_7322_variables.
7267 	 */
7268 	ret = qib_pcie_ddinit(dd, pdev, ent);
7269 	if (ret < 0)
7270 		goto bail_free;
7271 
7272 	/* initialize chip-specific variables */
7273 	ret = qib_init_7322_variables(dd);
7274 	if (ret)
7275 		goto bail_cleanup;
7276 
7277 	if (qib_mini_init || !dd->num_pports)
7278 		goto bail;
7279 
7280 	/*
7281 	 * Determine number of vectors we want; depends on port count
7282 	 * and number of configured kernel receive queues actually used.
7283 	 * Should also depend on whether sdma is enabled or not, but
7284 	 * that's such a rare testing case it's not worth worrying about.
7285 	 */
7286 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7287 	for (i = 0; i < tabsize; i++)
7288 		if ((i < ARRAY_SIZE(irq_table) &&
7289 		     irq_table[i].port <= dd->num_pports) ||
7290 		    (i >= ARRAY_SIZE(irq_table) &&
7291 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7292 			actual_cnt++;
7293 	/* reduce by ctxt's < 2 */
7294 	if (qib_krcvq01_no_msi)
7295 		actual_cnt -= dd->num_pports;
7296 
7297 	tabsize = actual_cnt;
7298 	dd->cspec->msix_entries = kcalloc(tabsize,
7299 					  sizeof(struct qib_msix_entry),
7300 					  GFP_KERNEL);
7301 	if (!dd->cspec->msix_entries)
7302 		tabsize = 0;
7303 
7304 	if (qib_pcie_params(dd, 8, &tabsize))
7305 		qib_dev_err(dd,
7306 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7307 	/* may be less than we wanted, if not enough available */
7308 	dd->cspec->num_msix_entries = tabsize;
7309 
7310 	/* setup interrupt handler */
7311 	qib_setup_7322_interrupt(dd, 1);
7312 
7313 	/* clear diagctrl register, in case diags were running and crashed */
7314 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7315 #ifdef CONFIG_INFINIBAND_QIB_DCA
7316 	if (!dca_add_requester(&pdev->dev)) {
7317 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7318 		dd->flags |= QIB_DCA_ENABLED;
7319 		qib_setup_dca(dd);
7320 	}
7321 #endif
7322 	goto bail;
7323 
7324 bail_cleanup:
7325 	qib_pcie_ddcleanup(dd);
7326 bail_free:
7327 	qib_free_devdata(dd);
7328 	dd = ERR_PTR(ret);
7329 bail:
7330 	return dd;
7331 }
7332 
7333 /*
7334  * Set the table entry at the specified index from the table specifed.
7335  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7336  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7337  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7338  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7339  */
7340 #define DDS_ENT_AMP_LSB 14
7341 #define DDS_ENT_MAIN_LSB 9
7342 #define DDS_ENT_POST_LSB 5
7343 #define DDS_ENT_PRE_XTRA_LSB 3
7344 #define DDS_ENT_PRE_LSB 0
7345 
7346 /*
7347  * Set one entry in the TxDDS table for spec'd port
7348  * ridx picks one of the entries, while tp points
7349  * to the appropriate table entry.
7350  */
set_txdds(struct qib_pportdata * ppd,int ridx,const struct txdds_ent * tp)7351 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7352 		      const struct txdds_ent *tp)
7353 {
7354 	struct qib_devdata *dd = ppd->dd;
7355 	u32 pack_ent;
7356 	int regidx;
7357 
7358 	/* Get correct offset in chip-space, and in source table */
7359 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7360 	/*
7361 	 * We do not use qib_write_kreg_port() because it was intended
7362 	 * only for registers in the lower "port specific" pages.
7363 	 * So do index calculation  by hand.
7364 	 */
7365 	if (ppd->hw_pidx)
7366 		regidx += (dd->palign / sizeof(u64));
7367 
7368 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7369 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7370 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7371 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7372 	qib_write_kreg(dd, regidx, pack_ent);
7373 	/* Prevent back-to-back writes by hitting scratch */
7374 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7375 }
7376 
7377 static const struct vendor_txdds_ent vendor_txdds[] = {
7378 	{ /* Amphenol 1m 30awg NoEq */
7379 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7380 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7381 	},
7382 	{ /* Amphenol 3m 28awg NoEq */
7383 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7384 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7385 	},
7386 	{ /* Finisar 3m OM2 Optical */
7387 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7388 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7389 	},
7390 	{ /* Finisar 30m OM2 Optical */
7391 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7392 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7393 	},
7394 	{ /* Finisar Default OM2 Optical */
7395 		{ 0x00, 0x90, 0x65 }, NULL,
7396 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7397 	},
7398 	{ /* Gore 1m 30awg NoEq */
7399 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7400 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7401 	},
7402 	{ /* Gore 2m 30awg NoEq */
7403 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7404 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7405 	},
7406 	{ /* Gore 1m 28awg NoEq */
7407 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7408 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7409 	},
7410 	{ /* Gore 3m 28awg NoEq */
7411 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7412 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7413 	},
7414 	{ /* Gore 5m 24awg Eq */
7415 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7416 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7417 	},
7418 	{ /* Gore 7m 24awg Eq */
7419 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7420 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7421 	},
7422 	{ /* Gore 5m 26awg Eq */
7423 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7424 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7425 	},
7426 	{ /* Gore 7m 26awg Eq */
7427 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7428 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7429 	},
7430 	{ /* Intersil 12m 24awg Active */
7431 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7432 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7433 	},
7434 	{ /* Intersil 10m 28awg Active */
7435 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7436 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7437 	},
7438 	{ /* Intersil 7m 30awg Active */
7439 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7440 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7441 	},
7442 	{ /* Intersil 5m 32awg Active */
7443 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7444 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7445 	},
7446 	{ /* Intersil Default Active */
7447 		{ 0x00, 0x30, 0xB4 }, NULL,
7448 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7449 	},
7450 	{ /* Luxtera 20m Active Optical */
7451 		{ 0x00, 0x25, 0x63 }, NULL,
7452 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7453 	},
7454 	{ /* Molex 1M Cu loopback */
7455 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7456 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7457 	},
7458 	{ /* Molex 2m 28awg NoEq */
7459 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7460 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7461 	},
7462 };
7463 
7464 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7465 	/* amp, pre, main, post */
7466 	{  2, 2, 15,  6 },	/* Loopback */
7467 	{  0, 0,  0,  1 },	/*  2 dB */
7468 	{  0, 0,  0,  2 },	/*  3 dB */
7469 	{  0, 0,  0,  3 },	/*  4 dB */
7470 	{  0, 0,  0,  4 },	/*  5 dB */
7471 	{  0, 0,  0,  5 },	/*  6 dB */
7472 	{  0, 0,  0,  6 },	/*  7 dB */
7473 	{  0, 0,  0,  7 },	/*  8 dB */
7474 	{  0, 0,  0,  8 },	/*  9 dB */
7475 	{  0, 0,  0,  9 },	/* 10 dB */
7476 	{  0, 0,  0, 10 },	/* 11 dB */
7477 	{  0, 0,  0, 11 },	/* 12 dB */
7478 	{  0, 0,  0, 12 },	/* 13 dB */
7479 	{  0, 0,  0, 13 },	/* 14 dB */
7480 	{  0, 0,  0, 14 },	/* 15 dB */
7481 	{  0, 0,  0, 15 },	/* 16 dB */
7482 };
7483 
7484 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7485 	/* amp, pre, main, post */
7486 	{  2, 2, 15,  6 },	/* Loopback */
7487 	{  0, 0,  0,  8 },	/*  2 dB */
7488 	{  0, 0,  0,  8 },	/*  3 dB */
7489 	{  0, 0,  0,  9 },	/*  4 dB */
7490 	{  0, 0,  0,  9 },	/*  5 dB */
7491 	{  0, 0,  0, 10 },	/*  6 dB */
7492 	{  0, 0,  0, 10 },	/*  7 dB */
7493 	{  0, 0,  0, 11 },	/*  8 dB */
7494 	{  0, 0,  0, 11 },	/*  9 dB */
7495 	{  0, 0,  0, 12 },	/* 10 dB */
7496 	{  0, 0,  0, 12 },	/* 11 dB */
7497 	{  0, 0,  0, 13 },	/* 12 dB */
7498 	{  0, 0,  0, 13 },	/* 13 dB */
7499 	{  0, 0,  0, 14 },	/* 14 dB */
7500 	{  0, 0,  0, 14 },	/* 15 dB */
7501 	{  0, 0,  0, 15 },	/* 16 dB */
7502 };
7503 
7504 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7505 	/* amp, pre, main, post */
7506 	{  2, 2, 15,  6 },	/* Loopback */
7507 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7508 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7509 	{  0, 1,  0, 11 },	/*  4 dB */
7510 	{  0, 1,  0, 13 },	/*  5 dB */
7511 	{  0, 1,  0, 15 },	/*  6 dB */
7512 	{  0, 1,  3, 15 },	/*  7 dB */
7513 	{  0, 1,  7, 15 },	/*  8 dB */
7514 	{  0, 1,  7, 15 },	/*  9 dB */
7515 	{  0, 1,  8, 15 },	/* 10 dB */
7516 	{  0, 1,  9, 15 },	/* 11 dB */
7517 	{  0, 1, 10, 15 },	/* 12 dB */
7518 	{  0, 2,  6, 15 },	/* 13 dB */
7519 	{  0, 2,  7, 15 },	/* 14 dB */
7520 	{  0, 2,  8, 15 },	/* 15 dB */
7521 	{  0, 2,  9, 15 },	/* 16 dB */
7522 };
7523 
7524 /*
7525  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7526  * These are mostly used for mez cards going through connectors
7527  * and backplane traces, but can be used to add other "unusual"
7528  * table values as well.
7529  */
7530 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7531 	/* amp, pre, main, post */
7532 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7533 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7534 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7535 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7536 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7537 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7538 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7539 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7540 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7541 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7542 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7543 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7544 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7545 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7546 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7547 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7548 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7549 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7550 };
7551 
7552 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7553 	/* amp, pre, main, post */
7554 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7555 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7556 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7557 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7558 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7559 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7560 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7561 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7562 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7563 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7564 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7565 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7566 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7567 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7568 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7569 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7570 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7571 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7572 };
7573 
7574 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7575 	/* amp, pre, main, post */
7576 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7577 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7578 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7579 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7580 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7581 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7582 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7583 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7584 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7585 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7586 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7587 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7588 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7589 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7590 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7591 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7592 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7593 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7594 };
7595 
7596 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7597 	/* amp, pre, main, post */
7598 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7599 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7600 };
7601 
get_atten_table(const struct txdds_ent * txdds,unsigned atten)7602 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7603 					       unsigned atten)
7604 {
7605 	/*
7606 	 * The attenuation table starts at 2dB for entry 1,
7607 	 * with entry 0 being the loopback entry.
7608 	 */
7609 	if (atten <= 2)
7610 		atten = 1;
7611 	else if (atten > TXDDS_TABLE_SZ)
7612 		atten = TXDDS_TABLE_SZ - 1;
7613 	else
7614 		atten--;
7615 	return txdds + atten;
7616 }
7617 
7618 /*
7619  * if override is set, the module parameter txselect has a value
7620  * for this specific port, so use it, rather than our normal mechanism.
7621  */
find_best_ent(struct qib_pportdata * ppd,const struct txdds_ent ** sdr_dds,const struct txdds_ent ** ddr_dds,const struct txdds_ent ** qdr_dds,int override)7622 static void find_best_ent(struct qib_pportdata *ppd,
7623 			  const struct txdds_ent **sdr_dds,
7624 			  const struct txdds_ent **ddr_dds,
7625 			  const struct txdds_ent **qdr_dds, int override)
7626 {
7627 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7628 	int idx;
7629 
7630 	/* Search table of known cables */
7631 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7632 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7633 
7634 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7635 		    (!v->partnum ||
7636 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7637 			*sdr_dds = &v->sdr;
7638 			*ddr_dds = &v->ddr;
7639 			*qdr_dds = &v->qdr;
7640 			return;
7641 		}
7642 	}
7643 
7644 	/* Active cables don't have attenuation so we only set SERDES
7645 	 * settings to account for the attenuation of the board traces. */
7646 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7647 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7648 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7649 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7650 		return;
7651 	}
7652 
7653 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7654 						      qd->atten[1])) {
7655 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7656 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7657 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7658 		return;
7659 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7660 		/*
7661 		 * If we have no (or incomplete) data from the cable
7662 		 * EEPROM, or no QSFP, or override is set, use the
7663 		 * module parameter value to index into the attentuation
7664 		 * table.
7665 		 */
7666 		idx = ppd->cpspec->no_eep;
7667 		*sdr_dds = &txdds_sdr[idx];
7668 		*ddr_dds = &txdds_ddr[idx];
7669 		*qdr_dds = &txdds_qdr[idx];
7670 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7671 		/* similar to above, but index into the "extra" table. */
7672 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7673 		*sdr_dds = &txdds_extra_sdr[idx];
7674 		*ddr_dds = &txdds_extra_ddr[idx];
7675 		*qdr_dds = &txdds_extra_qdr[idx];
7676 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7677 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7678 					  TXDDS_MFG_SZ)) {
7679 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7680 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7681 			ppd->dd->unit, ppd->port, idx);
7682 		*sdr_dds = &txdds_extra_mfg[idx];
7683 		*ddr_dds = &txdds_extra_mfg[idx];
7684 		*qdr_dds = &txdds_extra_mfg[idx];
7685 	} else {
7686 		/* this shouldn't happen, it's range checked */
7687 		*sdr_dds = txdds_sdr + qib_long_atten;
7688 		*ddr_dds = txdds_ddr + qib_long_atten;
7689 		*qdr_dds = txdds_qdr + qib_long_atten;
7690 	}
7691 }
7692 
init_txdds_table(struct qib_pportdata * ppd,int override)7693 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7694 {
7695 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7696 	struct txdds_ent *dds;
7697 	int idx;
7698 	int single_ent = 0;
7699 
7700 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7701 
7702 	/* for mez cards or override, use the selected value for all entries */
7703 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7704 		single_ent = 1;
7705 
7706 	/* Fill in the first entry with the best entry found. */
7707 	set_txdds(ppd, 0, sdr_dds);
7708 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7709 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7710 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7711 		QIBL_LINKACTIVE)) {
7712 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7713 					   QIB_IB_QDR ?  qdr_dds :
7714 					   (ppd->link_speed_active ==
7715 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7716 		write_tx_serdes_param(ppd, dds);
7717 	}
7718 
7719 	/* Fill in the remaining entries with the default table values. */
7720 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7721 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7722 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7723 			  single_ent ? ddr_dds : txdds_ddr + idx);
7724 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7725 			  single_ent ? qdr_dds : txdds_qdr + idx);
7726 	}
7727 }
7728 
7729 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7730 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7731 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7732 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7733 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7734 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7735 #define AHB_TRANS_TRIES 10
7736 
7737 /*
7738  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7739  * 5=subsystem which is why most calls have "chan + chan >> 1"
7740  * for the channel argument.
7741  */
ahb_mod(struct qib_devdata * dd,int quad,int chan,int addr,u32 data,u32 mask)7742 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7743 		    u32 data, u32 mask)
7744 {
7745 	u32 rd_data, wr_data, sz_mask;
7746 	u64 trans, acc, prev_acc;
7747 	u32 ret = 0xBAD0BAD;
7748 	int tries;
7749 
7750 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7751 	/* From this point on, make sure we return access */
7752 	acc = (quad << 1) | 1;
7753 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7754 
7755 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7756 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7757 		if (trans & AHB_TRANS_RDY)
7758 			break;
7759 	}
7760 	if (tries >= AHB_TRANS_TRIES) {
7761 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7762 		goto bail;
7763 	}
7764 
7765 	/* If mask is not all 1s, we need to read, but different SerDes
7766 	 * entities have different sizes
7767 	 */
7768 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7769 	wr_data = data & mask & sz_mask;
7770 	if ((~mask & sz_mask) != 0) {
7771 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7772 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7773 
7774 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7775 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7776 			if (trans & AHB_TRANS_RDY)
7777 				break;
7778 		}
7779 		if (tries >= AHB_TRANS_TRIES) {
7780 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7781 				    AHB_TRANS_TRIES);
7782 			goto bail;
7783 		}
7784 		/* Re-read in case host split reads and read data first */
7785 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7786 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7787 		wr_data |= (rd_data & ~mask & sz_mask);
7788 	}
7789 
7790 	/* If mask is not zero, we need to write. */
7791 	if (mask & sz_mask) {
7792 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7793 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7794 		trans |= AHB_WR;
7795 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7796 
7797 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7798 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7799 			if (trans & AHB_TRANS_RDY)
7800 				break;
7801 		}
7802 		if (tries >= AHB_TRANS_TRIES) {
7803 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7804 				    AHB_TRANS_TRIES);
7805 			goto bail;
7806 		}
7807 	}
7808 	ret = wr_data;
7809 bail:
7810 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7811 	return ret;
7812 }
7813 
ibsd_wr_allchans(struct qib_pportdata * ppd,int addr,unsigned data,unsigned mask)7814 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7815 			     unsigned mask)
7816 {
7817 	struct qib_devdata *dd = ppd->dd;
7818 	int chan;
7819 
7820 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7821 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7822 			data, mask);
7823 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7824 			0, 0);
7825 	}
7826 }
7827 
serdes_7322_los_enable(struct qib_pportdata * ppd,int enable)7828 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7829 {
7830 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7831 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7832 
7833 	if (enable && !state) {
7834 		pr_info("IB%u:%u Turning LOS on\n",
7835 			ppd->dd->unit, ppd->port);
7836 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7837 	} else if (!enable && state) {
7838 		pr_info("IB%u:%u Turning LOS off\n",
7839 			ppd->dd->unit, ppd->port);
7840 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7841 	}
7842 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7843 }
7844 
serdes_7322_init(struct qib_pportdata * ppd)7845 static int serdes_7322_init(struct qib_pportdata *ppd)
7846 {
7847 	int ret = 0;
7848 
7849 	if (ppd->dd->cspec->r1)
7850 		ret = serdes_7322_init_old(ppd);
7851 	else
7852 		ret = serdes_7322_init_new(ppd);
7853 	return ret;
7854 }
7855 
serdes_7322_init_old(struct qib_pportdata * ppd)7856 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7857 {
7858 	u32 le_val;
7859 
7860 	/*
7861 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7862 	 * for adapters with QSFP
7863 	 */
7864 	init_txdds_table(ppd, 0);
7865 
7866 	/* ensure no tx overrides from earlier driver loads */
7867 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7868 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7869 		reset_tx_deemphasis_override));
7870 
7871 	/* Patch some SerDes defaults to "Better for IB" */
7872 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7873 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7874 
7875 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7876 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7877 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7878 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7879 
7880 	/* May be overridden in qsfp_7322_event */
7881 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7882 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7883 
7884 	/* enable LE1 adaptation for all but QME, which is disabled */
7885 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7886 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7887 
7888 	/* Clear cmode-override, may be set from older driver */
7889 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7890 
7891 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7892 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7893 
7894 	/* setup LoS params; these are subsystem, so chan == 5 */
7895 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7896 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7897 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7898 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7899 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7900 
7901 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7902 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7903 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7904 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7905 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7906 
7907 	/* LoS filter select enabled */
7908 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7909 
7910 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7911 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7912 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7913 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7914 
7915 	serdes_7322_los_enable(ppd, 1);
7916 
7917 	/* rxbistena; set 0 to avoid effects of it switch later */
7918 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7919 
7920 	/* Configure 4 DFE taps, and only they adapt */
7921 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7922 
7923 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7924 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7925 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7926 
7927 	/*
7928 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7929 	 * always on, and QDR is initially enabled; later disabled.
7930 	 */
7931 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7932 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7933 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7934 			    ppd->dd->cspec->r1 ?
7935 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7936 	ppd->cpspec->qdr_dfe_on = 1;
7937 
7938 	/* FLoop LOS gate: PPM filter  enabled */
7939 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7940 
7941 	/* rx offset center enabled */
7942 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7943 
7944 	if (!ppd->dd->cspec->r1) {
7945 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7946 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7947 	}
7948 
7949 	/* Set the frequency loop bandwidth to 15 */
7950 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7951 
7952 	return 0;
7953 }
7954 
serdes_7322_init_new(struct qib_pportdata * ppd)7955 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7956 {
7957 	unsigned long tend;
7958 	u32 le_val, rxcaldone;
7959 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
7960 
7961 	/* Clear cmode-override, may be set from older driver */
7962 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7963 
7964 	/* ensure no tx overrides from earlier driver loads */
7965 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7966 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7967 		reset_tx_deemphasis_override));
7968 
7969 	/* START OF LSI SUGGESTED SERDES BRINGUP */
7970 	/* Reset - Calibration Setup */
7971 	/*       Stop DFE adaptaion */
7972 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7973 	/*       Disable LE1 */
7974 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7975 	/*       Disable autoadapt for LE1 */
7976 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7977 	/*       Disable LE2 */
7978 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7979 	/*       Disable VGA */
7980 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7981 	/*       Disable AFE Offset Cancel */
7982 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7983 	/*       Disable Timing Loop */
7984 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7985 	/*       Disable Frequency Loop */
7986 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7987 	/*       Disable Baseline Wander Correction */
7988 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7989 	/*       Disable RX Calibration */
7990 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7991 	/*       Disable RX Offset Calibration */
7992 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7993 	/*       Select BB CDR */
7994 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7995 	/*       CDR Step Size */
7996 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7997 	/*       Enable phase Calibration */
7998 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7999 	/*       DFE Bandwidth [2:14-12] */
8000 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8001 	/*       DFE Config (4 taps only) */
8002 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8003 	/*       Gain Loop Bandwidth */
8004 	if (!ppd->dd->cspec->r1) {
8005 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8006 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8007 	} else {
8008 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8009 	}
8010 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8011 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8012 	/*       Data Rate Select [5:7-6] (leave as default) */
8013 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8014 
8015 	/* RX REST */
8016 	/*       Single- or Multi-channel reset */
8017 	/*       RX Analog reset */
8018 	/*       RX Digital reset */
8019 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8020 	msleep(20);
8021 	/*       RX Analog reset */
8022 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8023 	msleep(20);
8024 	/*       RX Digital reset */
8025 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8026 	msleep(20);
8027 
8028 	/* setup LoS params; these are subsystem, so chan == 5 */
8029 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8030 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8031 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8032 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8033 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8034 
8035 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8036 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8037 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8038 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8039 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8040 
8041 	/* LoS filter select enabled */
8042 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8043 
8044 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8045 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8046 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8047 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8048 
8049 	/* Turn on LOS on initial SERDES init */
8050 	serdes_7322_los_enable(ppd, 1);
8051 	/* FLoop LOS gate: PPM filter  enabled */
8052 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8053 
8054 	/* RX LATCH CALIBRATION */
8055 	/*       Enable Eyefinder Phase Calibration latch */
8056 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8057 	/*       Enable RX Offset Calibration latch */
8058 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8059 	msleep(20);
8060 	/*       Start Calibration */
8061 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8062 	tend = jiffies + msecs_to_jiffies(500);
8063 	while (chan_done && !time_is_before_jiffies(tend)) {
8064 		msleep(20);
8065 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8066 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8067 					    (chan + (chan >> 1)),
8068 					    25, 0, 0);
8069 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8070 			    (~chan_done & (1 << chan)) == 0)
8071 				chan_done &= ~(1 << chan);
8072 		}
8073 	}
8074 	if (chan_done) {
8075 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8076 			 IBSD(ppd->hw_pidx), chan_done);
8077 	} else {
8078 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8079 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8080 					    (chan + (chan >> 1)),
8081 					    25, 0, 0);
8082 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8083 				pr_info("Serdes %d chan %d calibration failed\n",
8084 					IBSD(ppd->hw_pidx), chan);
8085 		}
8086 	}
8087 
8088 	/*       Turn off Calibration */
8089 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8090 	msleep(20);
8091 
8092 	/* BRING RX UP */
8093 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8094 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8095 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8096 	/*       Set LE2 Loop bandwidth */
8097 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8098 	/*       Enable LE2 */
8099 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8100 	msleep(20);
8101 	/*       Enable H0 only */
8102 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8103 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8104 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8105 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8106 	/*       Enable VGA */
8107 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8108 	msleep(20);
8109 	/*       Set Frequency Loop Bandwidth */
8110 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8111 	/*       Enable Frequency Loop */
8112 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8113 	/*       Set Timing Loop Bandwidth */
8114 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8115 	/*       Enable Timing Loop */
8116 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8117 	msleep(50);
8118 	/*       Enable DFE
8119 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8120 	 *       always on, and QDR is initially enabled; later disabled.
8121 	 */
8122 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8123 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8124 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8125 			    ppd->dd->cspec->r1 ?
8126 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8127 	ppd->cpspec->qdr_dfe_on = 1;
8128 	/*       Disable LE1  */
8129 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8130 	/*       Disable auto adapt for LE1 */
8131 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8132 	msleep(20);
8133 	/*       Enable AFE Offset Cancel */
8134 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8135 	/*       Enable Baseline Wander Correction */
8136 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8137 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8138 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8139 	/* VGA output common mode */
8140 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8141 
8142 	/*
8143 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8144 	 * for adapters with QSFP
8145 	 */
8146 	init_txdds_table(ppd, 0);
8147 
8148 	return 0;
8149 }
8150 
8151 /* start adjust QMH serdes parameters */
8152 
set_man_code(struct qib_pportdata * ppd,int chan,int code)8153 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8154 {
8155 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8156 		9, code << 9, 0x3f << 9);
8157 }
8158 
set_man_mode_h1(struct qib_pportdata * ppd,int chan,int enable,u32 tapenable)8159 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8160 	int enable, u32 tapenable)
8161 {
8162 	if (enable)
8163 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8164 			1, 3 << 10, 0x1f << 10);
8165 	else
8166 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8167 			1, 0, 0x1f << 10);
8168 }
8169 
8170 /* Set clock to 1, 0, 1, 0 */
clock_man(struct qib_pportdata * ppd,int chan)8171 static void clock_man(struct qib_pportdata *ppd, int chan)
8172 {
8173 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8174 		4, 0x4000, 0x4000);
8175 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8176 		4, 0, 0x4000);
8177 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8178 		4, 0x4000, 0x4000);
8179 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8180 		4, 0, 0x4000);
8181 }
8182 
8183 /*
8184  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8185  * The caller must pass the settings appropriate for the current speed,
8186  * or not care if they are correct for the current speed.
8187  */
write_tx_serdes_param(struct qib_pportdata * ppd,struct txdds_ent * txdds)8188 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8189 				  struct txdds_ent *txdds)
8190 {
8191 	u64 deemph;
8192 
8193 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8194 	/* field names for amp, main, post, pre, respectively */
8195 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8196 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8197 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8198 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8199 
8200 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8201 			   tx_override_deemphasis_select);
8202 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8203 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8204 				       txampcntl_d2a);
8205 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8206 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8207 				   txc0_ena);
8208 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8209 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8210 				    txcp1_ena);
8211 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8212 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8213 				    txcn1_ena);
8214 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8215 }
8216 
8217 /*
8218  * Set the parameters for mez cards on link bounce, so they are
8219  * always exactly what was requested.  Similar logic to init_txdds
8220  * but does just the serdes.
8221  */
adj_tx_serdes(struct qib_pportdata * ppd)8222 static void adj_tx_serdes(struct qib_pportdata *ppd)
8223 {
8224 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8225 	struct txdds_ent *dds;
8226 
8227 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8228 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8229 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8230 				ddr_dds : sdr_dds));
8231 	write_tx_serdes_param(ppd, dds);
8232 }
8233 
8234 /* set QDR forced value for H1, if needed */
force_h1(struct qib_pportdata * ppd)8235 static void force_h1(struct qib_pportdata *ppd)
8236 {
8237 	int chan;
8238 
8239 	ppd->cpspec->qdr_reforce = 0;
8240 	if (!ppd->dd->cspec->r1)
8241 		return;
8242 
8243 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8244 		set_man_mode_h1(ppd, chan, 1, 0);
8245 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8246 		clock_man(ppd, chan);
8247 		set_man_mode_h1(ppd, chan, 0, 0);
8248 	}
8249 }
8250 
8251 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8252 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8253 
8254 #define R_OPCODE_LSB 3
8255 #define R_OP_NOP 0
8256 #define R_OP_SHIFT 2
8257 #define R_OP_UPDATE 3
8258 #define R_TDI_LSB 2
8259 #define R_TDO_LSB 1
8260 #define R_RDY 1
8261 
qib_r_grab(struct qib_devdata * dd)8262 static int qib_r_grab(struct qib_devdata *dd)
8263 {
8264 	u64 val = SJA_EN;
8265 
8266 	qib_write_kreg(dd, kr_r_access, val);
8267 	qib_read_kreg32(dd, kr_scratch);
8268 	return 0;
8269 }
8270 
8271 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8272  * returns the current state of R_TDO
8273  */
qib_r_wait_for_rdy(struct qib_devdata * dd)8274 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8275 {
8276 	u64 val;
8277 	int timeout;
8278 
8279 	for (timeout = 0; timeout < 100 ; ++timeout) {
8280 		val = qib_read_kreg32(dd, kr_r_access);
8281 		if (val & R_RDY)
8282 			return (val >> R_TDO_LSB) & 1;
8283 	}
8284 	return -1;
8285 }
8286 
qib_r_shift(struct qib_devdata * dd,int bisten,int len,u8 * inp,u8 * outp)8287 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8288 		       int len, u8 *inp, u8 *outp)
8289 {
8290 	u64 valbase, val;
8291 	int ret, pos;
8292 
8293 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8294 		(R_OP_SHIFT << R_OPCODE_LSB);
8295 	ret = qib_r_wait_for_rdy(dd);
8296 	if (ret < 0)
8297 		goto bail;
8298 	for (pos = 0; pos < len; ++pos) {
8299 		val = valbase;
8300 		if (outp) {
8301 			outp[pos >> 3] &= ~(1 << (pos & 7));
8302 			outp[pos >> 3] |= (ret << (pos & 7));
8303 		}
8304 		if (inp) {
8305 			int tdi = inp[pos >> 3] >> (pos & 7);
8306 
8307 			val |= ((tdi & 1) << R_TDI_LSB);
8308 		}
8309 		qib_write_kreg(dd, kr_r_access, val);
8310 		qib_read_kreg32(dd, kr_scratch);
8311 		ret = qib_r_wait_for_rdy(dd);
8312 		if (ret < 0)
8313 			break;
8314 	}
8315 	/* Restore to NOP between operations. */
8316 	val =  SJA_EN | (bisten << BISTEN_LSB);
8317 	qib_write_kreg(dd, kr_r_access, val);
8318 	qib_read_kreg32(dd, kr_scratch);
8319 	ret = qib_r_wait_for_rdy(dd);
8320 
8321 	if (ret >= 0)
8322 		ret = pos;
8323 bail:
8324 	return ret;
8325 }
8326 
qib_r_update(struct qib_devdata * dd,int bisten)8327 static int qib_r_update(struct qib_devdata *dd, int bisten)
8328 {
8329 	u64 val;
8330 	int ret;
8331 
8332 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8333 	ret = qib_r_wait_for_rdy(dd);
8334 	if (ret >= 0) {
8335 		qib_write_kreg(dd, kr_r_access, val);
8336 		qib_read_kreg32(dd, kr_scratch);
8337 	}
8338 	return ret;
8339 }
8340 
8341 #define BISTEN_PORT_SEL 15
8342 #define LEN_PORT_SEL 625
8343 #define BISTEN_AT 17
8344 #define LEN_AT 156
8345 #define BISTEN_ETM 16
8346 #define LEN_ETM 632
8347 
8348 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8349 
8350 /* these are common for all IB port use cases. */
8351 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8352 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8353 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8354 };
8355 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8356 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8357 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8358 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8359 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8360 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8361 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8362 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8363 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8364 };
8365 static u8 at[BIT2BYTE(LEN_AT)] = {
8366 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8367 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8368 };
8369 
8370 /* used for IB1 or IB2, only one in use */
8371 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8372 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8373 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8374 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8375 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8376 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8377 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8378 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8379 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8380 };
8381 
8382 /* used when both IB1 and IB2 are in use */
8383 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8384 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8385 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8386 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8387 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8388 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8389 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8390 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8391 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8392 };
8393 
8394 /* used when only IB1 is in use */
8395 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8396 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8397 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8398 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8399 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8400 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8401 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8402 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8403 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8404 };
8405 
8406 /* used when only IB2 is in use */
8407 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8408 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8409 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8410 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8411 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8412 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8413 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8414 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8415 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8416 };
8417 
8418 /* used when both IB1 and IB2 are in use */
8419 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8420 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8421 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8422 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8423 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8424 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8425 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8426 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8427 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8428 };
8429 
8430 /*
8431  * Do setup to properly handle IB link recovery; if port is zero, we
8432  * are initializing to cover both ports; otherwise we are initializing
8433  * to cover a single port card, or the port has reached INIT and we may
8434  * need to switch coverage types.
8435  */
setup_7322_link_recovery(struct qib_pportdata * ppd,u32 both)8436 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8437 {
8438 	u8 *portsel, *etm;
8439 	struct qib_devdata *dd = ppd->dd;
8440 
8441 	if (!ppd->dd->cspec->r1)
8442 		return;
8443 	if (!both) {
8444 		dd->cspec->recovery_ports_initted++;
8445 		ppd->cpspec->recovery_init = 1;
8446 	}
8447 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8448 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8449 		etm = atetm_1port;
8450 	} else {
8451 		portsel = portsel_2port;
8452 		etm = atetm_2port;
8453 	}
8454 
8455 	if (qib_r_grab(dd) < 0 ||
8456 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8457 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8458 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8459 		qib_r_update(dd, BISTEN_AT) < 0 ||
8460 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8461 			    portsel, NULL) < 0 ||
8462 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8463 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8464 		qib_r_update(dd, BISTEN_AT) < 0 ||
8465 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8466 		qib_r_update(dd, BISTEN_ETM) < 0)
8467 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8468 }
8469 
check_7322_rxe_status(struct qib_pportdata * ppd)8470 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8471 {
8472 	struct qib_devdata *dd = ppd->dd;
8473 	u64 fmask;
8474 
8475 	if (dd->cspec->recovery_ports_initted != 1)
8476 		return; /* rest doesn't apply to dualport */
8477 	qib_write_kreg(dd, kr_control, dd->control |
8478 		       SYM_MASK(Control, FreezeMode));
8479 	(void)qib_read_kreg64(dd, kr_scratch);
8480 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8481 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8482 	if (!fmask) {
8483 		/*
8484 		 * require a powercycle before we'll work again, and make
8485 		 * sure we get no more interrupts, and don't turn off
8486 		 * freeze.
8487 		 */
8488 		ppd->dd->cspec->stay_in_freeze = 1;
8489 		qib_7322_set_intr_state(ppd->dd, 0);
8490 		qib_write_kreg(dd, kr_fmask, 0ULL);
8491 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8492 		return; /* eventually reset */
8493 	}
8494 
8495 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8496 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8497 
8498 	/* don't do the full clear_freeze(), not needed for this */
8499 	qib_write_kreg(dd, kr_control, dd->control);
8500 	qib_read_kreg32(dd, kr_scratch);
8501 	/* take IBC out of reset */
8502 	if (ppd->link_speed_supported) {
8503 		ppd->cpspec->ibcctrl_a &=
8504 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8505 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8506 				    ppd->cpspec->ibcctrl_a);
8507 		qib_read_kreg32(dd, kr_scratch);
8508 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8509 			qib_set_ib_7322_lstate(ppd, 0,
8510 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8511 	}
8512 }
8513