1  #ifndef _HFI1_KERNEL_H
2  #define _HFI1_KERNEL_H
3  /*
4   * Copyright(c) 2015-2018 Intel Corporation.
5   *
6   * This file is provided under a dual BSD/GPLv2 license.  When using or
7   * redistributing this file, you may do so under either license.
8   *
9   * GPL LICENSE SUMMARY
10   *
11   * This program is free software; you can redistribute it and/or modify
12   * it under the terms of version 2 of the GNU General Public License as
13   * published by the Free Software Foundation.
14   *
15   * This program is distributed in the hope that it will be useful, but
16   * WITHOUT ANY WARRANTY; without even the implied warranty of
17   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   * General Public License for more details.
19   *
20   * BSD LICENSE
21   *
22   * Redistribution and use in source and binary forms, with or without
23   * modification, are permitted provided that the following conditions
24   * are met:
25   *
26   *  - Redistributions of source code must retain the above copyright
27   *    notice, this list of conditions and the following disclaimer.
28   *  - Redistributions in binary form must reproduce the above copyright
29   *    notice, this list of conditions and the following disclaimer in
30   *    the documentation and/or other materials provided with the
31   *    distribution.
32   *  - Neither the name of Intel Corporation nor the names of its
33   *    contributors may be used to endorse or promote products derived
34   *    from this software without specific prior written permission.
35   *
36   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37   * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38   * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39   * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40   * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42   * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47   *
48   */
49  
50  #include <linux/interrupt.h>
51  #include <linux/pci.h>
52  #include <linux/dma-mapping.h>
53  #include <linux/mutex.h>
54  #include <linux/list.h>
55  #include <linux/scatterlist.h>
56  #include <linux/slab.h>
57  #include <linux/idr.h>
58  #include <linux/io.h>
59  #include <linux/fs.h>
60  #include <linux/completion.h>
61  #include <linux/kref.h>
62  #include <linux/sched.h>
63  #include <linux/cdev.h>
64  #include <linux/delay.h>
65  #include <linux/kthread.h>
66  #include <linux/i2c.h>
67  #include <linux/i2c-algo-bit.h>
68  #include <rdma/ib_hdrs.h>
69  #include <rdma/opa_addr.h>
70  #include <linux/rhashtable.h>
71  #include <linux/netdevice.h>
72  #include <rdma/rdma_vt.h>
73  
74  #include "chip_registers.h"
75  #include "common.h"
76  #include "verbs.h"
77  #include "pio.h"
78  #include "chip.h"
79  #include "mad.h"
80  #include "qsfp.h"
81  #include "platform.h"
82  #include "affinity.h"
83  
84  /* bumped 1 from s/w major version of TrueScale */
85  #define HFI1_CHIP_VERS_MAJ 3U
86  
87  /* don't care about this except printing */
88  #define HFI1_CHIP_VERS_MIN 0U
89  
90  /* The Organization Unique Identifier (Mfg code), and its position in GUID */
91  #define HFI1_OUI 0x001175
92  #define HFI1_OUI_LSB 40
93  
94  #define DROP_PACKET_OFF		0
95  #define DROP_PACKET_ON		1
96  
97  #define NEIGHBOR_TYPE_HFI		0
98  #define NEIGHBOR_TYPE_SWITCH	1
99  
100  extern unsigned long hfi1_cap_mask;
101  #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
102  #define HFI1_CAP_UGET_MASK(mask, cap) \
103  	(((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
104  #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
105  #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
106  #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
107  #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
108  #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
109  			HFI1_CAP_MISC_MASK)
110  /* Offline Disabled Reason is 4-bits */
111  #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
112  
113  /*
114   * Control context is always 0 and handles the error packets.
115   * It also handles the VL15 and multicast packets.
116   */
117  #define HFI1_CTRL_CTXT    0
118  
119  /*
120   * Driver context will store software counters for each of the events
121   * associated with these status registers
122   */
123  #define NUM_CCE_ERR_STATUS_COUNTERS 41
124  #define NUM_RCV_ERR_STATUS_COUNTERS 64
125  #define NUM_MISC_ERR_STATUS_COUNTERS 13
126  #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
127  #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
128  #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
129  #define NUM_SEND_ERR_STATUS_COUNTERS 3
130  #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
131  #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
132  
133  /*
134   * per driver stats, either not device nor port-specific, or
135   * summed over all of the devices and ports.
136   * They are described by name via ipathfs filesystem, so layout
137   * and number of elements can change without breaking compatibility.
138   * If members are added or deleted hfi1_statnames[] in debugfs.c must
139   * change to match.
140   */
141  struct hfi1_ib_stats {
142  	__u64 sps_ints; /* number of interrupts handled */
143  	__u64 sps_errints; /* number of error interrupts */
144  	__u64 sps_txerrs; /* tx-related packet errors */
145  	__u64 sps_rcverrs; /* non-crc rcv packet errors */
146  	__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
147  	__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
148  	__u64 sps_ctxts; /* number of contexts currently open */
149  	__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
150  	__u64 sps_buffull;
151  	__u64 sps_hdrfull;
152  };
153  
154  extern struct hfi1_ib_stats hfi1_stats;
155  extern const struct pci_error_handlers hfi1_pci_err_handler;
156  
157  /*
158   * First-cut criterion for "device is active" is
159   * two thousand dwords combined Tx, Rx traffic per
160   * 5-second interval. SMA packets are 64 dwords,
161   * and occur "a few per second", presumably each way.
162   */
163  #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
164  
165  /*
166   * Below contains all data related to a single context (formerly called port).
167   */
168  
169  struct hfi1_opcode_stats_perctx;
170  
171  struct ctxt_eager_bufs {
172  	struct eager_buffer {
173  		void *addr;
174  		dma_addr_t dma;
175  		ssize_t len;
176  	} *buffers;
177  	struct {
178  		void *addr;
179  		dma_addr_t dma;
180  	} *rcvtids;
181  	u32 size;                /* total size of eager buffers */
182  	u32 rcvtid_size;         /* size of each eager rcv tid */
183  	u16 count;               /* size of buffers array */
184  	u16 numbufs;             /* number of buffers allocated */
185  	u16 alloced;             /* number of rcvarray entries used */
186  	u16 threshold;           /* head update threshold */
187  };
188  
189  struct exp_tid_set {
190  	struct list_head list;
191  	u32 count;
192  };
193  
194  typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
195  struct hfi1_ctxtdata {
196  	/* rcvhdrq base, needs mmap before useful */
197  	void *rcvhdrq;
198  	/* kernel virtual address where hdrqtail is updated */
199  	volatile __le64 *rcvhdrtail_kvaddr;
200  	/* so functions that need physical port can get it easily */
201  	struct hfi1_pportdata *ppd;
202  	/* so file ops can get at unit */
203  	struct hfi1_devdata *dd;
204  	/* this receive context's assigned PIO ACK send context */
205  	struct send_context *sc;
206  	/* per context recv functions */
207  	const rhf_rcv_function_ptr *rhf_rcv_function_map;
208  	/*
209  	 * The interrupt handler for a particular receive context can vary
210  	 * throughout it's lifetime. This is not a lock protected data member so
211  	 * it must be updated atomically and the prev and new value must always
212  	 * be valid. Worst case is we process an extra interrupt and up to 64
213  	 * packets with the wrong interrupt handler.
214  	 */
215  	int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
216  	/* verbs rx_stats per rcd */
217  	struct hfi1_opcode_stats_perctx *opstats;
218  	/* clear interrupt mask */
219  	u64 imask;
220  	/* ctxt rcvhdrq head offset */
221  	u32 head;
222  	/* number of rcvhdrq entries */
223  	u16 rcvhdrq_cnt;
224  	u8 ireg;	/* clear interrupt register */
225  	/* receive packet sequence counter */
226  	u8 seq_cnt;
227  	/* size of each of the rcvhdrq entries */
228  	u8 rcvhdrqentsize;
229  	/* offset of RHF within receive header entry */
230  	u8 rhf_offset;
231  	/* dynamic receive available interrupt timeout */
232  	u8 rcvavail_timeout;
233  	/* Indicates that this is vnic context */
234  	bool is_vnic;
235  	/* vnic queue index this context is mapped to */
236  	u8 vnic_q_idx;
237  	/* Is ASPM interrupt supported for this context */
238  	bool aspm_intr_supported;
239  	/* ASPM state (enabled/disabled) for this context */
240  	bool aspm_enabled;
241  	/* Is ASPM processing enabled for this context (in intr context) */
242  	bool aspm_intr_enable;
243  	struct ctxt_eager_bufs egrbufs;
244  	/* QPs waiting for context processing */
245  	struct list_head qp_wait_list;
246  	/* tid allocation lists */
247  	struct exp_tid_set tid_group_list;
248  	struct exp_tid_set tid_used_list;
249  	struct exp_tid_set tid_full_list;
250  
251  	/* Timer for re-enabling ASPM if interrupt activity quiets down */
252  	struct timer_list aspm_timer;
253  	/* per-context configuration flags */
254  	unsigned long flags;
255  	/* array of tid_groups */
256  	struct tid_group  *groups;
257  	/* mmap of hdrq, must fit in 44 bits */
258  	dma_addr_t rcvhdrq_dma;
259  	dma_addr_t rcvhdrqtailaddr_dma;
260  	/* Last interrupt timestamp */
261  	ktime_t aspm_ts_last_intr;
262  	/* Last timestamp at which we scheduled a timer for this context */
263  	ktime_t aspm_ts_timer_sched;
264  	/* Lock to serialize between intr, timer intr and user threads */
265  	spinlock_t aspm_lock;
266  	/* Reference count the base context usage */
267  	struct kref kref;
268  	/* numa node of this context */
269  	int numa_id;
270  	/* associated msix interrupt. */
271  	s16 msix_intr;
272  	/* job key */
273  	u16 jkey;
274  	/* number of RcvArray groups for this context. */
275  	u16 rcv_array_groups;
276  	/* index of first eager TID entry. */
277  	u16 eager_base;
278  	/* number of expected TID entries */
279  	u16 expected_count;
280  	/* index of first expected TID entry. */
281  	u16 expected_base;
282  	/* Device context index */
283  	u8 ctxt;
284  
285  	/* PSM Specific fields */
286  	/* lock protecting all Expected TID data */
287  	struct mutex exp_mutex;
288  	/* when waiting for rcv or pioavail */
289  	wait_queue_head_t wait;
290  	/* uuid from PSM */
291  	u8 uuid[16];
292  	/* same size as task_struct .comm[], command that opened context */
293  	char comm[TASK_COMM_LEN];
294  	/* Bitmask of in use context(s) */
295  	DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
296  	/* per-context event flags for fileops/intr communication */
297  	unsigned long event_flags;
298  	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
299  	void *subctxt_uregbase;
300  	/* An array of pages for the eager receive buffers * N */
301  	void *subctxt_rcvegrbuf;
302  	/* An array of pages for the eager header queue entries * N */
303  	void *subctxt_rcvhdr_base;
304  	/* total number of polled urgent packets */
305  	u32 urgent;
306  	/* saved total number of polled urgent packets for poll edge trigger */
307  	u32 urgent_poll;
308  	/* Type of packets or conditions we want to poll for */
309  	u16 poll_type;
310  	/* non-zero if ctxt is being shared. */
311  	u16 subctxt_id;
312  	/* The version of the library which opened this ctxt */
313  	u32 userversion;
314  	/*
315  	 * non-zero if ctxt can be shared, and defines the maximum number of
316  	 * sub-contexts for this device context.
317  	 */
318  	u8 subctxt_cnt;
319  
320  };
321  
322  /**
323   * rcvhdrq_size - return total size in bytes for header queue
324   * @rcd: the receive context
325   *
326   * rcvhdrqentsize is in DWs, so we have to convert to bytes
327   *
328   */
rcvhdrq_size(struct hfi1_ctxtdata * rcd)329  static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
330  {
331  	return PAGE_ALIGN(rcd->rcvhdrq_cnt *
332  			  rcd->rcvhdrqentsize * sizeof(u32));
333  }
334  
335  /*
336   * Represents a single packet at a high level. Put commonly computed things in
337   * here so we do not have to keep doing them over and over. The rule of thumb is
338   * if something is used one time to derive some value, store that something in
339   * here. If it is used multiple times, then store the result of that derivation
340   * in here.
341   */
342  struct hfi1_packet {
343  	void *ebuf;
344  	void *hdr;
345  	void *payload;
346  	struct hfi1_ctxtdata *rcd;
347  	__le32 *rhf_addr;
348  	struct rvt_qp *qp;
349  	struct ib_other_headers *ohdr;
350  	struct ib_grh *grh;
351  	struct opa_16b_mgmt *mgmt;
352  	u64 rhf;
353  	u32 maxcnt;
354  	u32 rhqoff;
355  	u32 dlid;
356  	u32 slid;
357  	u16 tlen;
358  	s16 etail;
359  	u16 pkey;
360  	u8 hlen;
361  	u8 numpkt;
362  	u8 rsize;
363  	u8 updegr;
364  	u8 etype;
365  	u8 extra_byte;
366  	u8 pad;
367  	u8 sc;
368  	u8 sl;
369  	u8 opcode;
370  	bool migrated;
371  };
372  
373  /* Packet types */
374  #define HFI1_PKT_TYPE_9B  0
375  #define HFI1_PKT_TYPE_16B 1
376  
377  /*
378   * OPA 16B Header
379   */
380  #define OPA_16B_L4_MASK		0xFFull
381  #define OPA_16B_SC_MASK		0x1F00000ull
382  #define OPA_16B_SC_SHIFT	20
383  #define OPA_16B_LID_MASK	0xFFFFFull
384  #define OPA_16B_DLID_MASK	0xF000ull
385  #define OPA_16B_DLID_SHIFT	20
386  #define OPA_16B_DLID_HIGH_SHIFT	12
387  #define OPA_16B_SLID_MASK	0xF00ull
388  #define OPA_16B_SLID_SHIFT	20
389  #define OPA_16B_SLID_HIGH_SHIFT	8
390  #define OPA_16B_BECN_MASK       0x80000000ull
391  #define OPA_16B_BECN_SHIFT      31
392  #define OPA_16B_FECN_MASK       0x10000000ull
393  #define OPA_16B_FECN_SHIFT      28
394  #define OPA_16B_L2_MASK		0x60000000ull
395  #define OPA_16B_L2_SHIFT	29
396  #define OPA_16B_PKEY_MASK	0xFFFF0000ull
397  #define OPA_16B_PKEY_SHIFT	16
398  #define OPA_16B_LEN_MASK	0x7FF00000ull
399  #define OPA_16B_LEN_SHIFT	20
400  #define OPA_16B_RC_MASK		0xE000000ull
401  #define OPA_16B_RC_SHIFT	25
402  #define OPA_16B_AGE_MASK	0xFF0000ull
403  #define OPA_16B_AGE_SHIFT	16
404  #define OPA_16B_ENTROPY_MASK	0xFFFFull
405  
406  /*
407   * OPA 16B L2/L4 Encodings
408   */
409  #define OPA_16B_L4_9B		0x00
410  #define OPA_16B_L2_TYPE		0x02
411  #define OPA_16B_L4_FM		0x08
412  #define OPA_16B_L4_IB_LOCAL	0x09
413  #define OPA_16B_L4_IB_GLOBAL	0x0A
414  #define OPA_16B_L4_ETHR		OPA_VNIC_L4_ETHR
415  
416  /*
417   * OPA 16B Management
418   */
419  #define OPA_16B_L4_FM_PAD	3  /* fixed 3B pad */
420  #define OPA_16B_L4_FM_HLEN	24 /* 16B(16) + L4_FM(8) */
421  
hfi1_16B_get_l4(struct hfi1_16b_header * hdr)422  static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
423  {
424  	return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
425  }
426  
hfi1_16B_get_sc(struct hfi1_16b_header * hdr)427  static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
428  {
429  	return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT);
430  }
431  
hfi1_16B_get_dlid(struct hfi1_16b_header * hdr)432  static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
433  {
434  	return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) |
435  		     (((hdr->lrh[2] & OPA_16B_DLID_MASK) >>
436  		     OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT));
437  }
438  
hfi1_16B_get_slid(struct hfi1_16b_header * hdr)439  static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
440  {
441  	return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) |
442  		     (((hdr->lrh[2] & OPA_16B_SLID_MASK) >>
443  		     OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT));
444  }
445  
hfi1_16B_get_becn(struct hfi1_16b_header * hdr)446  static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
447  {
448  	return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT);
449  }
450  
hfi1_16B_get_fecn(struct hfi1_16b_header * hdr)451  static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
452  {
453  	return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT);
454  }
455  
hfi1_16B_get_l2(struct hfi1_16b_header * hdr)456  static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
457  {
458  	return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT);
459  }
460  
hfi1_16B_get_pkey(struct hfi1_16b_header * hdr)461  static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
462  {
463  	return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT);
464  }
465  
hfi1_16B_get_rc(struct hfi1_16b_header * hdr)466  static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
467  {
468  	return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT);
469  }
470  
hfi1_16B_get_age(struct hfi1_16b_header * hdr)471  static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
472  {
473  	return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT);
474  }
475  
hfi1_16B_get_len(struct hfi1_16b_header * hdr)476  static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
477  {
478  	return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT);
479  }
480  
hfi1_16B_get_entropy(struct hfi1_16b_header * hdr)481  static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
482  {
483  	return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK);
484  }
485  
486  #define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
487  
488  /*
489   * BTH
490   */
491  #define OPA_16B_BTH_PAD_MASK	7
hfi1_16B_bth_get_pad(struct ib_other_headers * ohdr)492  static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
493  {
494  	return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
495  		   OPA_16B_BTH_PAD_MASK);
496  }
497  
498  /*
499   * 16B Management
500   */
501  #define OPA_16B_MGMT_QPN_MASK	0xFFFFFF
hfi1_16B_get_dest_qpn(struct opa_16b_mgmt * mgmt)502  static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
503  {
504  	return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK;
505  }
506  
hfi1_16B_get_src_qpn(struct opa_16b_mgmt * mgmt)507  static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
508  {
509  	return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK;
510  }
511  
hfi1_16B_set_qpn(struct opa_16b_mgmt * mgmt,u32 dest_qp,u32 src_qp)512  static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
513  				    u32 dest_qp, u32 src_qp)
514  {
515  	mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK);
516  	mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
517  }
518  
519  struct rvt_sge_state;
520  
521  /*
522   * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
523   * Mostly for MADs that set or query link parameters, also ipath
524   * config interfaces
525   */
526  #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
527  #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
528  #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
529  #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
530  #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
531  #define HFI1_IB_CFG_SPD 5 /* current Link spd */
532  #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
533  #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
534  #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
535  #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
536  #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
537  #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
538  #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
539  #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
540  #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
541  #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
542  #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
543  #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
544  #define HFI1_IB_CFG_VL_HIGH_LIMIT 19
545  #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
546  #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
547  
548  /*
549   * HFI or Host Link States
550   *
551   * These describe the states the driver thinks the logical and physical
552   * states are in.  Used as an argument to set_link_state().  Implemented
553   * as bits for easy multi-state checking.  The actual state can only be
554   * one.
555   */
556  #define __HLS_UP_INIT_BP	0
557  #define __HLS_UP_ARMED_BP	1
558  #define __HLS_UP_ACTIVE_BP	2
559  #define __HLS_DN_DOWNDEF_BP	3	/* link down default */
560  #define __HLS_DN_POLL_BP	4
561  #define __HLS_DN_DISABLE_BP	5
562  #define __HLS_DN_OFFLINE_BP	6
563  #define __HLS_VERIFY_CAP_BP	7
564  #define __HLS_GOING_UP_BP	8
565  #define __HLS_GOING_OFFLINE_BP  9
566  #define __HLS_LINK_COOLDOWN_BP 10
567  
568  #define HLS_UP_INIT	  BIT(__HLS_UP_INIT_BP)
569  #define HLS_UP_ARMED	  BIT(__HLS_UP_ARMED_BP)
570  #define HLS_UP_ACTIVE	  BIT(__HLS_UP_ACTIVE_BP)
571  #define HLS_DN_DOWNDEF	  BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
572  #define HLS_DN_POLL	  BIT(__HLS_DN_POLL_BP)
573  #define HLS_DN_DISABLE	  BIT(__HLS_DN_DISABLE_BP)
574  #define HLS_DN_OFFLINE	  BIT(__HLS_DN_OFFLINE_BP)
575  #define HLS_VERIFY_CAP	  BIT(__HLS_VERIFY_CAP_BP)
576  #define HLS_GOING_UP	  BIT(__HLS_GOING_UP_BP)
577  #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
578  #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
579  
580  #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
581  #define HLS_DOWN ~(HLS_UP)
582  
583  #define HLS_DEFAULT HLS_DN_POLL
584  
585  /* use this MTU size if none other is given */
586  #define HFI1_DEFAULT_ACTIVE_MTU 10240
587  /* use this MTU size as the default maximum */
588  #define HFI1_DEFAULT_MAX_MTU 10240
589  /* default partition key */
590  #define DEFAULT_PKEY 0xffff
591  
592  /*
593   * Possible fabric manager config parameters for fm_{get,set}_table()
594   */
595  #define FM_TBL_VL_HIGH_ARB		1 /* Get/set VL high prio weights */
596  #define FM_TBL_VL_LOW_ARB		2 /* Get/set VL low prio weights */
597  #define FM_TBL_BUFFER_CONTROL		3 /* Get/set Buffer Control */
598  #define FM_TBL_SC2VLNT			4 /* Get/set SC->VLnt */
599  #define FM_TBL_VL_PREEMPT_ELEMS		5 /* Get (no set) VL preempt elems */
600  #define FM_TBL_VL_PREEMPT_MATRIX	6 /* Get (no set) VL preempt matrix */
601  
602  /*
603   * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
604   * these are bits so they can be combined, e.g.
605   * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
606   */
607  #define HFI1_RCVCTRL_TAILUPD_ENB 0x01
608  #define HFI1_RCVCTRL_TAILUPD_DIS 0x02
609  #define HFI1_RCVCTRL_CTXT_ENB 0x04
610  #define HFI1_RCVCTRL_CTXT_DIS 0x08
611  #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
612  #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
613  #define HFI1_RCVCTRL_PKEY_ENB 0x40  /* Note, default is enabled */
614  #define HFI1_RCVCTRL_PKEY_DIS 0x80
615  #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
616  #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
617  #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
618  #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
619  #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
620  #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
621  #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
622  #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
623  
624  /* partition enforcement flags */
625  #define HFI1_PART_ENFORCE_IN	0x1
626  #define HFI1_PART_ENFORCE_OUT	0x2
627  
628  /* how often we check for synthetic counter wrap around */
629  #define SYNTH_CNT_TIME 3
630  
631  /* Counter flags */
632  #define CNTR_NORMAL		0x0 /* Normal counters, just read register */
633  #define CNTR_SYNTH		0x1 /* Synthetic counters, saturate at all 1s */
634  #define CNTR_DISABLED		0x2 /* Disable this counter */
635  #define CNTR_32BIT		0x4 /* Simulate 64 bits for this counter */
636  #define CNTR_VL			0x8 /* Per VL counter */
637  #define CNTR_SDMA              0x10
638  #define CNTR_INVALID_VL		-1  /* Specifies invalid VL */
639  #define CNTR_MODE_W		0x0
640  #define CNTR_MODE_R		0x1
641  
642  /* VLs Supported/Operational */
643  #define HFI1_MIN_VLS_SUPPORTED 1
644  #define HFI1_MAX_VLS_SUPPORTED 8
645  
646  #define HFI1_GUIDS_PER_PORT  5
647  #define HFI1_PORT_GUID_INDEX 0
648  
incr_cntr64(u64 * cntr)649  static inline void incr_cntr64(u64 *cntr)
650  {
651  	if (*cntr < (u64)-1LL)
652  		(*cntr)++;
653  }
654  
incr_cntr32(u32 * cntr)655  static inline void incr_cntr32(u32 *cntr)
656  {
657  	if (*cntr < (u32)-1LL)
658  		(*cntr)++;
659  }
660  
661  #define MAX_NAME_SIZE 64
662  struct hfi1_msix_entry {
663  	enum irq_type type;
664  	int irq;
665  	void *arg;
666  	cpumask_t mask;
667  	struct irq_affinity_notify notify;
668  };
669  
670  /* per-SL CCA information */
671  struct cca_timer {
672  	struct hrtimer hrtimer;
673  	struct hfi1_pportdata *ppd; /* read-only */
674  	int sl; /* read-only */
675  	u16 ccti; /* read/write - current value of CCTI */
676  };
677  
678  struct link_down_reason {
679  	/*
680  	 * SMA-facing value.  Should be set from .latest when
681  	 * HLS_UP_* -> HLS_DN_* transition actually occurs.
682  	 */
683  	u8 sma;
684  	u8 latest;
685  };
686  
687  enum {
688  	LO_PRIO_TABLE,
689  	HI_PRIO_TABLE,
690  	MAX_PRIO_TABLE
691  };
692  
693  struct vl_arb_cache {
694  	/* protect vl arb cache */
695  	spinlock_t lock;
696  	struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
697  };
698  
699  /*
700   * The structure below encapsulates data relevant to a physical IB Port.
701   * Current chips support only one such port, but the separation
702   * clarifies things a bit. Note that to conform to IB conventions,
703   * port-numbers are one-based. The first or only port is port1.
704   */
705  struct hfi1_pportdata {
706  	struct hfi1_ibport ibport_data;
707  
708  	struct hfi1_devdata *dd;
709  	struct kobject pport_cc_kobj;
710  	struct kobject sc2vl_kobj;
711  	struct kobject sl2sc_kobj;
712  	struct kobject vl2mtu_kobj;
713  
714  	/* PHY support */
715  	struct qsfp_data qsfp_info;
716  	/* Values for SI tuning of SerDes */
717  	u32 port_type;
718  	u32 tx_preset_eq;
719  	u32 tx_preset_noeq;
720  	u32 rx_preset;
721  	u8  local_atten;
722  	u8  remote_atten;
723  	u8  default_atten;
724  	u8  max_power_class;
725  
726  	/* did we read platform config from scratch registers? */
727  	bool config_from_scratch;
728  
729  	/* GUIDs for this interface, in host order, guids[0] is a port guid */
730  	u64 guids[HFI1_GUIDS_PER_PORT];
731  
732  	/* GUID for peer interface, in host order */
733  	u64 neighbor_guid;
734  
735  	/* up or down physical link state */
736  	u32 linkup;
737  
738  	/*
739  	 * this address is mapped read-only into user processes so they can
740  	 * get status cheaply, whenever they want.  One qword of status per port
741  	 */
742  	u64 *statusp;
743  
744  	/* SendDMA related entries */
745  
746  	struct workqueue_struct *hfi1_wq;
747  	struct workqueue_struct *link_wq;
748  
749  	/* move out of interrupt context */
750  	struct work_struct link_vc_work;
751  	struct work_struct link_up_work;
752  	struct work_struct link_down_work;
753  	struct work_struct sma_message_work;
754  	struct work_struct freeze_work;
755  	struct work_struct link_downgrade_work;
756  	struct work_struct link_bounce_work;
757  	struct delayed_work start_link_work;
758  	/* host link state variables */
759  	struct mutex hls_lock;
760  	u32 host_link_state;
761  
762  	/* these are the "32 bit" regs */
763  
764  	u32 ibmtu; /* The MTU programmed for this unit */
765  	/*
766  	 * Current max size IB packet (in bytes) including IB headers, that
767  	 * we can send. Changes when ibmtu changes.
768  	 */
769  	u32 ibmaxlen;
770  	u32 current_egress_rate; /* units [10^6 bits/sec] */
771  	/* LID programmed for this instance */
772  	u32 lid;
773  	/* list of pkeys programmed; 0 if not set */
774  	u16 pkeys[MAX_PKEY_VALUES];
775  	u16 link_width_supported;
776  	u16 link_width_downgrade_supported;
777  	u16 link_speed_supported;
778  	u16 link_width_enabled;
779  	u16 link_width_downgrade_enabled;
780  	u16 link_speed_enabled;
781  	u16 link_width_active;
782  	u16 link_width_downgrade_tx_active;
783  	u16 link_width_downgrade_rx_active;
784  	u16 link_speed_active;
785  	u8 vls_supported;
786  	u8 vls_operational;
787  	u8 actual_vls_operational;
788  	/* LID mask control */
789  	u8 lmc;
790  	/* Rx Polarity inversion (compensate for ~tx on partner) */
791  	u8 rx_pol_inv;
792  
793  	u8 hw_pidx;     /* physical port index */
794  	u8 port;        /* IB port number and index into dd->pports - 1 */
795  	/* type of neighbor node */
796  	u8 neighbor_type;
797  	u8 neighbor_normal;
798  	u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
799  	u8 neighbor_port_number;
800  	u8 is_sm_config_started;
801  	u8 offline_disabled_reason;
802  	u8 is_active_optimize_enabled;
803  	u8 driver_link_ready;	/* driver ready for active link */
804  	u8 link_enabled;	/* link enabled? */
805  	u8 linkinit_reason;
806  	u8 local_tx_rate;	/* rate given to 8051 firmware */
807  	u8 qsfp_retry_count;
808  
809  	/* placeholders for IB MAD packet settings */
810  	u8 overrun_threshold;
811  	u8 phy_error_threshold;
812  	unsigned int is_link_down_queued;
813  
814  	/* Used to override LED behavior for things like maintenance beaconing*/
815  	/*
816  	 * Alternates per phase of blink
817  	 * [0] holds LED off duration, [1] holds LED on duration
818  	 */
819  	unsigned long led_override_vals[2];
820  	u8 led_override_phase; /* LSB picks from vals[] */
821  	atomic_t led_override_timer_active;
822  	/* Used to flash LEDs in override mode */
823  	struct timer_list led_override_timer;
824  
825  	u32 sm_trap_qp;
826  	u32 sa_qp;
827  
828  	/*
829  	 * cca_timer_lock protects access to the per-SL cca_timer
830  	 * structures (specifically the ccti member).
831  	 */
832  	spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
833  	struct cca_timer cca_timer[OPA_MAX_SLS];
834  
835  	/* List of congestion control table entries */
836  	struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
837  
838  	/* congestion entries, each entry corresponding to a SL */
839  	struct opa_congestion_setting_entry_shadow
840  		congestion_entries[OPA_MAX_SLS];
841  
842  	/*
843  	 * cc_state_lock protects (write) access to the per-port
844  	 * struct cc_state.
845  	 */
846  	spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
847  
848  	struct cc_state __rcu *cc_state;
849  
850  	/* Total number of congestion control table entries */
851  	u16 total_cct_entry;
852  
853  	/* Bit map identifying service level */
854  	u32 cc_sl_control_map;
855  
856  	/* CA's max number of 64 entry units in the congestion control table */
857  	u8 cc_max_table_entries;
858  
859  	/*
860  	 * begin congestion log related entries
861  	 * cc_log_lock protects all congestion log related data
862  	 */
863  	spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
864  	u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
865  	u16 threshold_event_counter;
866  	struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
867  	int cc_log_idx; /* index for logging events */
868  	int cc_mad_idx; /* index for reporting events */
869  	/* end congestion log related entries */
870  
871  	struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
872  
873  	/* port relative counter buffer */
874  	u64 *cntrs;
875  	/* port relative synthetic counter buffer */
876  	u64 *scntrs;
877  	/* port_xmit_discards are synthesized from different egress errors */
878  	u64 port_xmit_discards;
879  	u64 port_xmit_discards_vl[C_VL_COUNT];
880  	u64 port_xmit_constraint_errors;
881  	u64 port_rcv_constraint_errors;
882  	/* count of 'link_err' interrupts from DC */
883  	u64 link_downed;
884  	/* number of times link retrained successfully */
885  	u64 link_up;
886  	/* number of times a link unknown frame was reported */
887  	u64 unknown_frame_count;
888  	/* port_ltp_crc_mode is returned in 'portinfo' MADs */
889  	u16 port_ltp_crc_mode;
890  	/* port_crc_mode_enabled is the crc we support */
891  	u8 port_crc_mode_enabled;
892  	/* mgmt_allowed is also returned in 'portinfo' MADs */
893  	u8 mgmt_allowed;
894  	u8 part_enforce; /* partition enforcement flags */
895  	struct link_down_reason local_link_down_reason;
896  	struct link_down_reason neigh_link_down_reason;
897  	/* Value to be sent to link peer on LinkDown .*/
898  	u8 remote_link_down_reason;
899  	/* Error events that will cause a port bounce. */
900  	u32 port_error_action;
901  	struct work_struct linkstate_active_work;
902  	/* Does this port need to prescan for FECNs */
903  	bool cc_prescan;
904  	/*
905  	 * Sample sendWaitCnt & sendWaitVlCnt during link transition
906  	 * and counter request.
907  	 */
908  	u64 port_vl_xmit_wait_last[C_VL_COUNT + 1];
909  	u16 prev_link_width;
910  	u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
911  };
912  
913  typedef void (*opcode_handler)(struct hfi1_packet *packet);
914  typedef void (*hfi1_make_req)(struct rvt_qp *qp,
915  			      struct hfi1_pkt_state *ps,
916  			      struct rvt_swqe *wqe);
917  extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
918  
919  
920  /* return values for the RHF receive functions */
921  #define RHF_RCV_CONTINUE  0	/* keep going */
922  #define RHF_RCV_DONE	  1	/* stop, this packet processed */
923  #define RHF_RCV_REPROCESS 2	/* stop. retain this packet */
924  
925  struct rcv_array_data {
926  	u16 ngroups;
927  	u16 nctxt_extra;
928  	u8 group_size;
929  };
930  
931  struct per_vl_data {
932  	u16 mtu;
933  	struct send_context *sc;
934  };
935  
936  /* 16 to directly index */
937  #define PER_VL_SEND_CONTEXTS 16
938  
939  struct err_info_rcvport {
940  	u8 status_and_code;
941  	u64 packet_flit1;
942  	u64 packet_flit2;
943  };
944  
945  struct err_info_constraint {
946  	u8 status;
947  	u16 pkey;
948  	u32 slid;
949  };
950  
951  struct hfi1_temp {
952  	unsigned int curr;       /* current temperature */
953  	unsigned int lo_lim;     /* low temperature limit */
954  	unsigned int hi_lim;     /* high temperature limit */
955  	unsigned int crit_lim;   /* critical temperature limit */
956  	u8 triggers;      /* temperature triggers */
957  };
958  
959  struct hfi1_i2c_bus {
960  	struct hfi1_devdata *controlling_dd; /* current controlling device */
961  	struct i2c_adapter adapter;	/* bus details */
962  	struct i2c_algo_bit_data algo;	/* bus algorithm details */
963  	int num;			/* bus number, 0 or 1 */
964  };
965  
966  /* common data between shared ASIC HFIs */
967  struct hfi1_asic_data {
968  	struct hfi1_devdata *dds[2];	/* back pointers */
969  	struct mutex asic_resource_mutex;
970  	struct hfi1_i2c_bus *i2c_bus0;
971  	struct hfi1_i2c_bus *i2c_bus1;
972  };
973  
974  /* sizes for both the QP and RSM map tables */
975  #define NUM_MAP_ENTRIES	 256
976  #define NUM_MAP_REGS      32
977  
978  /*
979   * Number of VNIC contexts used. Ensure it is less than or equal to
980   * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
981   */
982  #define HFI1_NUM_VNIC_CTXT   8
983  
984  /* Number of VNIC RSM entries */
985  #define NUM_VNIC_MAP_ENTRIES 8
986  
987  /* Virtual NIC information */
988  struct hfi1_vnic_data {
989  	struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
990  	struct kmem_cache *txreq_cache;
991  	u8 num_vports;
992  	struct idr vesw_idr;
993  	u8 rmt_start;
994  	u8 num_ctxt;
995  	u32 msix_idx;
996  };
997  
998  struct hfi1_vnic_vport_info;
999  
1000  /* device data struct now contains only "general per-device" info.
1001   * fields related to a physical IB port are in a hfi1_pportdata struct.
1002   */
1003  struct sdma_engine;
1004  struct sdma_vl_map;
1005  
1006  #define BOARD_VERS_MAX 96 /* how long the version string can be */
1007  #define SERIAL_MAX 16 /* length of the serial number */
1008  
1009  typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
1010  struct hfi1_devdata {
1011  	struct hfi1_ibdev verbs_dev;     /* must be first */
1012  	struct list_head list;
1013  	/* pointers to related structs for this device */
1014  	/* pci access data structure */
1015  	struct pci_dev *pcidev;
1016  	struct cdev user_cdev;
1017  	struct cdev diag_cdev;
1018  	struct cdev ui_cdev;
1019  	struct device *user_device;
1020  	struct device *diag_device;
1021  	struct device *ui_device;
1022  
1023  	/* first mapping up to RcvArray */
1024  	u8 __iomem *kregbase1;
1025  	resource_size_t physaddr;
1026  
1027  	/* second uncached mapping from RcvArray to pio send buffers */
1028  	u8 __iomem *kregbase2;
1029  	/* for detecting offset above kregbase2 address */
1030  	u32 base2_start;
1031  
1032  	/* Per VL data. Enough for all VLs but not all elements are set/used. */
1033  	struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
1034  	/* send context data */
1035  	struct send_context_info *send_contexts;
1036  	/* map hardware send contexts to software index */
1037  	u8 *hw_to_sw;
1038  	/* spinlock for allocating and releasing send context resources */
1039  	spinlock_t sc_lock;
1040  	/* lock for pio_map */
1041  	spinlock_t pio_map_lock;
1042  	/* Send Context initialization lock. */
1043  	spinlock_t sc_init_lock;
1044  	/* lock for sdma_map */
1045  	spinlock_t                          sde_map_lock;
1046  	/* array of kernel send contexts */
1047  	struct send_context **kernel_send_context;
1048  	/* array of vl maps */
1049  	struct pio_vl_map __rcu *pio_map;
1050  	/* default flags to last descriptor */
1051  	u64 default_desc1;
1052  
1053  	/* fields common to all SDMA engines */
1054  
1055  	volatile __le64                    *sdma_heads_dma; /* DMA'ed by chip */
1056  	dma_addr_t                          sdma_heads_phys;
1057  	void                               *sdma_pad_dma; /* DMA'ed by chip */
1058  	dma_addr_t                          sdma_pad_phys;
1059  	/* for deallocation */
1060  	size_t                              sdma_heads_size;
1061  	/* num used */
1062  	u32                                 num_sdma;
1063  	/* array of engines sized by num_sdma */
1064  	struct sdma_engine                 *per_sdma;
1065  	/* array of vl maps */
1066  	struct sdma_vl_map __rcu           *sdma_map;
1067  	/* SPC freeze waitqueue and variable */
1068  	wait_queue_head_t		  sdma_unfreeze_wq;
1069  	atomic_t			  sdma_unfreeze_count;
1070  
1071  	u32 lcb_access_count;		/* count of LCB users */
1072  
1073  	/* common data between shared ASIC HFIs in this OS */
1074  	struct hfi1_asic_data *asic_data;
1075  
1076  	/* mem-mapped pointer to base of PIO buffers */
1077  	void __iomem *piobase;
1078  	/*
1079  	 * write-combining mem-mapped pointer to base of RcvArray
1080  	 * memory.
1081  	 */
1082  	void __iomem *rcvarray_wc;
1083  	/*
1084  	 * credit return base - a per-NUMA range of DMA address that
1085  	 * the chip will use to update the per-context free counter
1086  	 */
1087  	struct credit_return_base *cr_base;
1088  
1089  	/* send context numbers and sizes for each type */
1090  	struct sc_config_sizes sc_sizes[SC_MAX];
1091  
1092  	char *boardname; /* human readable board info */
1093  
1094  	/* reset value */
1095  	u64 z_int_counter;
1096  	u64 z_rcv_limit;
1097  	u64 z_send_schedule;
1098  
1099  	u64 __percpu *send_schedule;
1100  	/* number of reserved contexts for VNIC usage */
1101  	u16 num_vnic_contexts;
1102  	/* number of receive contexts in use by the driver */
1103  	u32 num_rcv_contexts;
1104  	/* number of pio send contexts in use by the driver */
1105  	u32 num_send_contexts;
1106  	/*
1107  	 * number of ctxts available for PSM open
1108  	 */
1109  	u32 freectxts;
1110  	/* total number of available user/PSM contexts */
1111  	u32 num_user_contexts;
1112  	/* base receive interrupt timeout, in CSR units */
1113  	u32 rcv_intr_timeout_csr;
1114  
1115  	spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
1116  	spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
1117  	spinlock_t uctxt_lock; /* protect rcd changes */
1118  	struct mutex dc8051_lock; /* exclusive access to 8051 */
1119  	struct workqueue_struct *update_cntr_wq;
1120  	struct work_struct update_cntr_work;
1121  	/* exclusive access to 8051 memory */
1122  	spinlock_t dc8051_memlock;
1123  	int dc8051_timed_out;	/* remember if the 8051 timed out */
1124  	/*
1125  	 * A page that will hold event notification bitmaps for all
1126  	 * contexts. This page will be mapped into all processes.
1127  	 */
1128  	unsigned long *events;
1129  	/*
1130  	 * per unit status, see also portdata statusp
1131  	 * mapped read-only into user processes so they can get unit and
1132  	 * IB link status cheaply
1133  	 */
1134  	struct hfi1_status *status;
1135  
1136  	/* revision register shadow */
1137  	u64 revision;
1138  	/* Base GUID for device (network order) */
1139  	u64 base_guid;
1140  
1141  	/* both sides of the PCIe link are gen3 capable */
1142  	u8 link_gen3_capable;
1143  	u8 dc_shutdown;
1144  	/* localbus width (1, 2,4,8,16,32) from config space  */
1145  	u32 lbus_width;
1146  	/* localbus speed in MHz */
1147  	u32 lbus_speed;
1148  	int unit; /* unit # of this chip */
1149  	int node; /* home node of this chip */
1150  
1151  	/* save these PCI fields to restore after a reset */
1152  	u32 pcibar0;
1153  	u32 pcibar1;
1154  	u32 pci_rom;
1155  	u16 pci_command;
1156  	u16 pcie_devctl;
1157  	u16 pcie_lnkctl;
1158  	u16 pcie_devctl2;
1159  	u32 pci_msix0;
1160  	u32 pci_tph2;
1161  
1162  	/*
1163  	 * ASCII serial number, from flash, large enough for original
1164  	 * all digit strings, and longer serial number format
1165  	 */
1166  	u8 serial[SERIAL_MAX];
1167  	/* human readable board version */
1168  	u8 boardversion[BOARD_VERS_MAX];
1169  	u8 lbus_info[32]; /* human readable localbus info */
1170  	/* chip major rev, from CceRevision */
1171  	u8 majrev;
1172  	/* chip minor rev, from CceRevision */
1173  	u8 minrev;
1174  	/* hardware ID */
1175  	u8 hfi1_id;
1176  	/* implementation code */
1177  	u8 icode;
1178  	/* vAU of this device */
1179  	u8 vau;
1180  	/* vCU of this device */
1181  	u8 vcu;
1182  	/* link credits of this device */
1183  	u16 link_credits;
1184  	/* initial vl15 credits to use */
1185  	u16 vl15_init;
1186  
1187  	/*
1188  	 * Cached value for vl15buf, read during verify cap interrupt. VL15
1189  	 * credits are to be kept at 0 and set when handling the link-up
1190  	 * interrupt. This removes the possibility of receiving VL15 MAD
1191  	 * packets before this HFI is ready.
1192  	 */
1193  	u16 vl15buf_cached;
1194  
1195  	/* Misc small ints */
1196  	u8 n_krcv_queues;
1197  	u8 qos_shift;
1198  
1199  	u16 irev;	/* implementation revision */
1200  	u32 dc8051_ver; /* 8051 firmware version */
1201  
1202  	spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
1203  	struct platform_config platform_config;
1204  	struct platform_config_cache pcfg_cache;
1205  
1206  	struct diag_client *diag_client;
1207  
1208  	/* MSI-X information */
1209  	struct hfi1_msix_entry *msix_entries;
1210  	u32 num_msix_entries;
1211  	u32 first_dyn_msix_idx;
1212  
1213  	/* general interrupt: mask of handled interrupts */
1214  	u64 gi_mask[CCE_NUM_INT_CSRS];
1215  
1216  	struct rcv_array_data rcv_entries;
1217  
1218  	/* cycle length of PS* counters in HW (in picoseconds) */
1219  	u16 psxmitwait_check_rate;
1220  
1221  	/*
1222  	 * 64 bit synthetic counters
1223  	 */
1224  	struct timer_list synth_stats_timer;
1225  
1226  	/*
1227  	 * device counters
1228  	 */
1229  	char *cntrnames;
1230  	size_t cntrnameslen;
1231  	size_t ndevcntrs;
1232  	u64 *cntrs;
1233  	u64 *scntrs;
1234  
1235  	/*
1236  	 * remembered values for synthetic counters
1237  	 */
1238  	u64 last_tx;
1239  	u64 last_rx;
1240  
1241  	/*
1242  	 * per-port counters
1243  	 */
1244  	size_t nportcntrs;
1245  	char *portcntrnames;
1246  	size_t portcntrnameslen;
1247  
1248  	struct err_info_rcvport err_info_rcvport;
1249  	struct err_info_constraint err_info_rcv_constraint;
1250  	struct err_info_constraint err_info_xmit_constraint;
1251  
1252  	atomic_t drop_packet;
1253  	u8 do_drop;
1254  	u8 err_info_uncorrectable;
1255  	u8 err_info_fmconfig;
1256  
1257  	/*
1258  	 * Software counters for the status bits defined by the
1259  	 * associated error status registers
1260  	 */
1261  	u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
1262  	u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
1263  	u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
1264  	u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
1265  	u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
1266  	u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
1267  	u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
1268  
1269  	/* Software counter that spans all contexts */
1270  	u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
1271  	/* Software counter that spans all DMA engines */
1272  	u64 sw_send_dma_eng_err_status_cnt[
1273  		NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
1274  	/* Software counter that aggregates all cce_err_status errors */
1275  	u64 sw_cce_err_status_aggregate;
1276  	/* Software counter that aggregates all bypass packet rcv errors */
1277  	u64 sw_rcv_bypass_packet_errors;
1278  
1279  	/* Save the enabled LCB error bits */
1280  	u64 lcb_err_en;
1281  	struct cpu_mask_set *comp_vect;
1282  	int *comp_vect_mappings;
1283  	u32 comp_vect_possible_cpus;
1284  
1285  	/*
1286  	 * Capability to have different send engines simply by changing a
1287  	 * pointer value.
1288  	 */
1289  	send_routine process_pio_send ____cacheline_aligned_in_smp;
1290  	send_routine process_dma_send;
1291  	void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1292  				u64 pbc, const void *from, size_t count);
1293  	int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx,
1294  				     struct hfi1_vnic_vport_info *vinfo,
1295  				     struct sk_buff *skb, u64 pbc, u8 plen);
1296  	/* hfi1_pportdata, points to array of (physical) port-specific
1297  	 * data structs, indexed by pidx (0..n-1)
1298  	 */
1299  	struct hfi1_pportdata *pport;
1300  	/* receive context data */
1301  	struct hfi1_ctxtdata **rcd;
1302  	u64 __percpu *int_counter;
1303  	/* verbs tx opcode stats */
1304  	struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
1305  	/* device (not port) flags, basically device capabilities */
1306  	u16 flags;
1307  	/* Number of physical ports available */
1308  	u8 num_pports;
1309  	/* Lowest context number which can be used by user processes or VNIC */
1310  	u8 first_dyn_alloc_ctxt;
1311  	/* adding a new field here would make it part of this cacheline */
1312  
1313  	/* seqlock for sc2vl */
1314  	seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
1315  	u64 sc2vl[4];
1316  	u64 __percpu *rcv_limit;
1317  	/* adding a new field here would make it part of this cacheline */
1318  
1319  	/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
1320  	u8 oui1;
1321  	u8 oui2;
1322  	u8 oui3;
1323  
1324  	/* Timer and counter used to detect RcvBufOvflCnt changes */
1325  	struct timer_list rcverr_timer;
1326  
1327  	wait_queue_head_t event_queue;
1328  
1329  	/* receive context tail dummy address */
1330  	__le64 *rcvhdrtail_dummy_kvaddr;
1331  	dma_addr_t rcvhdrtail_dummy_dma;
1332  
1333  	u32 rcv_ovfl_cnt;
1334  	/* Serialize ASPM enable/disable between multiple verbs contexts */
1335  	spinlock_t aspm_lock;
1336  	/* Number of verbs contexts which have disabled ASPM */
1337  	atomic_t aspm_disabled_cnt;
1338  	/* Keeps track of user space clients */
1339  	atomic_t user_refcount;
1340  	/* Used to wait for outstanding user space clients before dev removal */
1341  	struct completion user_comp;
1342  
1343  	bool eprom_available;	/* true if EPROM is available for this device */
1344  	bool aspm_supported;	/* Does HW support ASPM */
1345  	bool aspm_enabled;	/* ASPM state: enabled/disabled */
1346  	struct rhashtable *sdma_rht;
1347  
1348  	struct kobject kobj;
1349  
1350  	/* vnic data */
1351  	struct hfi1_vnic_data vnic;
1352  };
1353  
hfi1_vnic_is_rsm_full(struct hfi1_devdata * dd,int spare)1354  static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
1355  {
1356  	return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
1357  }
1358  
1359  /* 8051 firmware version helper */
1360  #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
1361  #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
1362  #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
1363  #define dc8051_ver_patch(a) ((a) & 0x0000ff)
1364  
1365  /* f_put_tid types */
1366  #define PT_EXPECTED       0
1367  #define PT_EAGER          1
1368  #define PT_INVALID_FLUSH  2
1369  #define PT_INVALID        3
1370  
1371  struct tid_rb_node;
1372  struct mmu_rb_node;
1373  struct mmu_rb_handler;
1374  
1375  /* Private data for file operations */
1376  struct hfi1_filedata {
1377  	struct hfi1_devdata *dd;
1378  	struct hfi1_ctxtdata *uctxt;
1379  	struct hfi1_user_sdma_comp_q *cq;
1380  	struct hfi1_user_sdma_pkt_q *pq;
1381  	u16 subctxt;
1382  	/* for cpu affinity; -1 if none */
1383  	int rec_cpu_num;
1384  	u32 tid_n_pinned;
1385  	struct mmu_rb_handler *handler;
1386  	struct tid_rb_node **entry_to_rb;
1387  	spinlock_t tid_lock; /* protect tid_[limit,used] counters */
1388  	u32 tid_limit;
1389  	u32 tid_used;
1390  	u32 *invalid_tids;
1391  	u32 invalid_tid_idx;
1392  	/* protect invalid_tids array and invalid_tid_idx */
1393  	spinlock_t invalid_lock;
1394  	struct mm_struct *mm;
1395  };
1396  
1397  extern struct list_head hfi1_dev_list;
1398  extern spinlock_t hfi1_devs_lock;
1399  struct hfi1_devdata *hfi1_lookup(int unit);
1400  
uctxt_offset(struct hfi1_ctxtdata * uctxt)1401  static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
1402  {
1403  	return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
1404  		HFI1_MAX_SHARED_CTXTS;
1405  }
1406  
1407  int hfi1_init(struct hfi1_devdata *dd, int reinit);
1408  int hfi1_count_active_units(void);
1409  
1410  int hfi1_diag_add(struct hfi1_devdata *dd);
1411  void hfi1_diag_remove(struct hfi1_devdata *dd);
1412  void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
1413  
1414  void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
1415  
1416  int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1417  int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
1418  int hfi1_create_kctxts(struct hfi1_devdata *dd);
1419  int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
1420  			 struct hfi1_ctxtdata **rcd);
1421  void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
1422  void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
1423  			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
1424  void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1425  int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
1426  void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
1427  struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
1428  						 u16 ctxt);
1429  struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
1430  int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
1431  int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1432  int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1433  void set_all_slowpath(struct hfi1_devdata *dd);
1434  void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
1435  void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
1436  void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
1437  
1438  extern const struct pci_device_id hfi1_pci_tbl[];
1439  void hfi1_make_ud_req_9B(struct rvt_qp *qp,
1440  			 struct hfi1_pkt_state *ps,
1441  			 struct rvt_swqe *wqe);
1442  
1443  void hfi1_make_ud_req_16B(struct rvt_qp *qp,
1444  			  struct hfi1_pkt_state *ps,
1445  			  struct rvt_swqe *wqe);
1446  
1447  /* receive packet handler dispositions */
1448  #define RCV_PKT_OK      0x0 /* keep going */
1449  #define RCV_PKT_LIMIT   0x1 /* stop, hit limit, start thread */
1450  #define RCV_PKT_DONE    0x2 /* stop, no more packets detected */
1451  
1452  /* calculate the current RHF address */
get_rhf_addr(struct hfi1_ctxtdata * rcd)1453  static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
1454  {
1455  	return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
1456  }
1457  
1458  int hfi1_reset_device(int);
1459  
1460  void receive_interrupt_work(struct work_struct *work);
1461  
1462  /* extract service channel from header and rhf */
hfi1_9B_get_sc5(struct ib_header * hdr,u64 rhf)1463  static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf)
1464  {
1465  	return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4);
1466  }
1467  
1468  #define HFI1_JKEY_WIDTH       16
1469  #define HFI1_JKEY_MASK        (BIT(16) - 1)
1470  #define HFI1_ADMIN_JKEY_RANGE 32
1471  
1472  /*
1473   * J_KEYs are split and allocated in the following groups:
1474   *   0 - 31    - users with administrator privileges
1475   *  32 - 63    - kernel protocols using KDETH packets
1476   *  64 - 65535 - all other users using KDETH packets
1477   */
generate_jkey(kuid_t uid)1478  static inline u16 generate_jkey(kuid_t uid)
1479  {
1480  	u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
1481  
1482  	if (capable(CAP_SYS_ADMIN))
1483  		jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
1484  	else if (jkey < 64)
1485  		jkey |= BIT(HFI1_JKEY_WIDTH - 1);
1486  
1487  	return jkey;
1488  }
1489  
1490  /*
1491   * active_egress_rate
1492   *
1493   * returns the active egress rate in units of [10^6 bits/sec]
1494   */
active_egress_rate(struct hfi1_pportdata * ppd)1495  static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
1496  {
1497  	u16 link_speed = ppd->link_speed_active;
1498  	u16 link_width = ppd->link_width_active;
1499  	u32 egress_rate;
1500  
1501  	if (link_speed == OPA_LINK_SPEED_25G)
1502  		egress_rate = 25000;
1503  	else /* assume OPA_LINK_SPEED_12_5G */
1504  		egress_rate = 12500;
1505  
1506  	switch (link_width) {
1507  	case OPA_LINK_WIDTH_4X:
1508  		egress_rate *= 4;
1509  		break;
1510  	case OPA_LINK_WIDTH_3X:
1511  		egress_rate *= 3;
1512  		break;
1513  	case OPA_LINK_WIDTH_2X:
1514  		egress_rate *= 2;
1515  		break;
1516  	default:
1517  		/* assume IB_WIDTH_1X */
1518  		break;
1519  	}
1520  
1521  	return egress_rate;
1522  }
1523  
1524  /*
1525   * egress_cycles
1526   *
1527   * Returns the number of 'fabric clock cycles' to egress a packet
1528   * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
1529   * rate is (approximately) 805 MHz, the units of the returned value
1530   * are (1/805 MHz).
1531   */
egress_cycles(u32 len,u32 rate)1532  static inline u32 egress_cycles(u32 len, u32 rate)
1533  {
1534  	u32 cycles;
1535  
1536  	/*
1537  	 * cycles is:
1538  	 *
1539  	 *          (length) [bits] / (rate) [bits/sec]
1540  	 *  ---------------------------------------------------
1541  	 *  fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
1542  	 */
1543  
1544  	cycles = len * 8; /* bits */
1545  	cycles *= 805;
1546  	cycles /= rate;
1547  
1548  	return cycles;
1549  }
1550  
1551  void set_link_ipg(struct hfi1_pportdata *ppd);
1552  void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
1553  		  u32 rqpn, u8 svc_type);
1554  void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
1555  		u16 pkey, u32 slid, u32 dlid, u8 sc5,
1556  		const struct ib_grh *old_grh);
1557  void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1558  		    u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
1559  		    u8 sc5, const struct ib_grh *old_grh);
1560  typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1561  				u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
1562  				u8 sc5, const struct ib_grh *old_grh);
1563  
1564  #define PKEY_CHECK_INVALID -1
1565  int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
1566  		      u8 sc5, int8_t s_pkey_index);
1567  
1568  #define PACKET_EGRESS_TIMEOUT 350
pause_for_credit_return(struct hfi1_devdata * dd)1569  static inline void pause_for_credit_return(struct hfi1_devdata *dd)
1570  {
1571  	/* Pause at least 1us, to ensure chip returns all credits */
1572  	u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
1573  
1574  	udelay(usec ? usec : 1);
1575  }
1576  
1577  /**
1578   * sc_to_vlt() reverse lookup sc to vl
1579   * @dd - devdata
1580   * @sc5 - 5 bit sc
1581   */
sc_to_vlt(struct hfi1_devdata * dd,u8 sc5)1582  static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
1583  {
1584  	unsigned seq;
1585  	u8 rval;
1586  
1587  	if (sc5 >= OPA_MAX_SCS)
1588  		return (u8)(0xff);
1589  
1590  	do {
1591  		seq = read_seqbegin(&dd->sc2vl_lock);
1592  		rval = *(((u8 *)dd->sc2vl) + sc5);
1593  	} while (read_seqretry(&dd->sc2vl_lock, seq));
1594  
1595  	return rval;
1596  }
1597  
1598  #define PKEY_MEMBER_MASK 0x8000
1599  #define PKEY_LOW_15_MASK 0x7fff
1600  
1601  /*
1602   * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1603   * being an entry from the ingress partition key table), return 0
1604   * otherwise. Use the matching criteria for ingress partition keys
1605   * specified in the OPAv1 spec., section 9.10.14.
1606   */
ingress_pkey_matches_entry(u16 pkey,u16 ent)1607  static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
1608  {
1609  	u16 mkey = pkey & PKEY_LOW_15_MASK;
1610  	u16 ment = ent & PKEY_LOW_15_MASK;
1611  
1612  	if (mkey == ment) {
1613  		/*
1614  		 * If pkey[15] is clear (limited partition member),
1615  		 * is bit 15 in the corresponding table element
1616  		 * clear (limited member)?
1617  		 */
1618  		if (!(pkey & PKEY_MEMBER_MASK))
1619  			return !!(ent & PKEY_MEMBER_MASK);
1620  		return 1;
1621  	}
1622  	return 0;
1623  }
1624  
1625  /*
1626   * ingress_pkey_table_search - search the entire pkey table for
1627   * an entry which matches 'pkey'. return 0 if a match is found,
1628   * and 1 otherwise.
1629   */
ingress_pkey_table_search(struct hfi1_pportdata * ppd,u16 pkey)1630  static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
1631  {
1632  	int i;
1633  
1634  	for (i = 0; i < MAX_PKEY_VALUES; i++) {
1635  		if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1636  			return 0;
1637  	}
1638  	return 1;
1639  }
1640  
1641  /*
1642   * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1643   * i.e., increment port_rcv_constraint_errors for the port, and record
1644   * the 'error info' for this failure.
1645   */
ingress_pkey_table_fail(struct hfi1_pportdata * ppd,u16 pkey,u32 slid)1646  static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
1647  				    u32 slid)
1648  {
1649  	struct hfi1_devdata *dd = ppd->dd;
1650  
1651  	incr_cntr64(&ppd->port_rcv_constraint_errors);
1652  	if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
1653  		dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
1654  		dd->err_info_rcv_constraint.slid = slid;
1655  		dd->err_info_rcv_constraint.pkey = pkey;
1656  	}
1657  }
1658  
1659  /*
1660   * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1661   * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
1662   * is a hint as to the best place in the partition key table to begin
1663   * searching. This function should not be called on the data path because
1664   * of performance reasons. On datapath pkey check is expected to be done
1665   * by HW and rcv_pkey_check function should be called instead.
1666   */
ingress_pkey_check(struct hfi1_pportdata * ppd,u16 pkey,u8 sc5,u8 idx,u32 slid,bool force)1667  static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1668  				     u8 sc5, u8 idx, u32 slid, bool force)
1669  {
1670  	if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1671  		return 0;
1672  
1673  	/* If SC15, pkey[0:14] must be 0x7fff */
1674  	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1675  		goto bad;
1676  
1677  	/* Is the pkey = 0x0, or 0x8000? */
1678  	if ((pkey & PKEY_LOW_15_MASK) == 0)
1679  		goto bad;
1680  
1681  	/* The most likely matching pkey has index 'idx' */
1682  	if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
1683  		return 0;
1684  
1685  	/* no match - try the whole table */
1686  	if (!ingress_pkey_table_search(ppd, pkey))
1687  		return 0;
1688  
1689  bad:
1690  	ingress_pkey_table_fail(ppd, pkey, slid);
1691  	return 1;
1692  }
1693  
1694  /*
1695   * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1696   * otherwise. It only ensures pkey is vlid for QP0. This function
1697   * should be called on the data path instead of ingress_pkey_check
1698   * as on data path, pkey check is done by HW (except for QP0).
1699   */
rcv_pkey_check(struct hfi1_pportdata * ppd,u16 pkey,u8 sc5,u16 slid)1700  static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1701  				 u8 sc5, u16 slid)
1702  {
1703  	if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1704  		return 0;
1705  
1706  	/* If SC15, pkey[0:14] must be 0x7fff */
1707  	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1708  		goto bad;
1709  
1710  	return 0;
1711  bad:
1712  	ingress_pkey_table_fail(ppd, pkey, slid);
1713  	return 1;
1714  }
1715  
1716  /* MTU handling */
1717  
1718  /* MTU enumeration, 256-4k match IB */
1719  #define OPA_MTU_0     0
1720  #define OPA_MTU_256   1
1721  #define OPA_MTU_512   2
1722  #define OPA_MTU_1024  3
1723  #define OPA_MTU_2048  4
1724  #define OPA_MTU_4096  5
1725  
1726  u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
1727  int mtu_to_enum(u32 mtu, int default_if_bad);
1728  u16 enum_to_mtu(int mtu);
valid_ib_mtu(unsigned int mtu)1729  static inline int valid_ib_mtu(unsigned int mtu)
1730  {
1731  	return mtu == 256 || mtu == 512 ||
1732  		mtu == 1024 || mtu == 2048 ||
1733  		mtu == 4096;
1734  }
1735  
valid_opa_max_mtu(unsigned int mtu)1736  static inline int valid_opa_max_mtu(unsigned int mtu)
1737  {
1738  	return mtu >= 2048 &&
1739  		(valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
1740  }
1741  
1742  int set_mtu(struct hfi1_pportdata *ppd);
1743  
1744  int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
1745  void hfi1_disable_after_error(struct hfi1_devdata *dd);
1746  int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
1747  int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
1748  
1749  int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
1750  int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
1751  
1752  void set_up_vau(struct hfi1_devdata *dd, u8 vau);
1753  void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
1754  void reset_link_credits(struct hfi1_devdata *dd);
1755  void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1756  
1757  int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
1758  
dd_from_ppd(struct hfi1_pportdata * ppd)1759  static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
1760  {
1761  	return ppd->dd;
1762  }
1763  
dd_from_dev(struct hfi1_ibdev * dev)1764  static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
1765  {
1766  	return container_of(dev, struct hfi1_devdata, verbs_dev);
1767  }
1768  
dd_from_ibdev(struct ib_device * ibdev)1769  static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
1770  {
1771  	return dd_from_dev(to_idev(ibdev));
1772  }
1773  
ppd_from_ibp(struct hfi1_ibport * ibp)1774  static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
1775  {
1776  	return container_of(ibp, struct hfi1_pportdata, ibport_data);
1777  }
1778  
dev_from_rdi(struct rvt_dev_info * rdi)1779  static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
1780  {
1781  	return container_of(rdi, struct hfi1_ibdev, rdi);
1782  }
1783  
to_iport(struct ib_device * ibdev,u8 port)1784  static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
1785  {
1786  	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1787  	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
1788  
1789  	WARN_ON(pidx >= dd->num_pports);
1790  	return &dd->pport[pidx].ibport_data;
1791  }
1792  
rcd_to_iport(struct hfi1_ctxtdata * rcd)1793  static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
1794  {
1795  	return &rcd->ppd->ibport_data;
1796  }
1797  
1798  void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
1799  			       bool do_cnp);
process_ecn(struct rvt_qp * qp,struct hfi1_packet * pkt,bool do_cnp)1800  static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
1801  			       bool do_cnp)
1802  {
1803  	bool becn;
1804  	bool fecn;
1805  
1806  	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
1807  		fecn = hfi1_16B_get_fecn(pkt->hdr);
1808  		becn = hfi1_16B_get_becn(pkt->hdr);
1809  	} else {
1810  		fecn = ib_bth_get_fecn(pkt->ohdr);
1811  		becn = ib_bth_get_becn(pkt->ohdr);
1812  	}
1813  	if (unlikely(fecn || becn)) {
1814  		hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
1815  		return fecn;
1816  	}
1817  	return false;
1818  }
1819  
1820  /*
1821   * Return the indexed PKEY from the port PKEY table.
1822   */
hfi1_get_pkey(struct hfi1_ibport * ibp,unsigned index)1823  static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
1824  {
1825  	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1826  	u16 ret;
1827  
1828  	if (index >= ARRAY_SIZE(ppd->pkeys))
1829  		ret = 0;
1830  	else
1831  		ret = ppd->pkeys[index];
1832  
1833  	return ret;
1834  }
1835  
1836  /*
1837   * Return the indexed GUID from the port GUIDs table.
1838   */
get_sguid(struct hfi1_ibport * ibp,unsigned int index)1839  static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
1840  {
1841  	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1842  
1843  	WARN_ON(index >= HFI1_GUIDS_PER_PORT);
1844  	return cpu_to_be64(ppd->guids[index]);
1845  }
1846  
1847  /*
1848   * Called by readers of cc_state only, must call under rcu_read_lock().
1849   */
get_cc_state(struct hfi1_pportdata * ppd)1850  static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
1851  {
1852  	return rcu_dereference(ppd->cc_state);
1853  }
1854  
1855  /*
1856   * Called by writers of cc_state only,  must call under cc_state_lock.
1857   */
1858  static inline
get_cc_state_protected(struct hfi1_pportdata * ppd)1859  struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
1860  {
1861  	return rcu_dereference_protected(ppd->cc_state,
1862  					 lockdep_is_held(&ppd->cc_state_lock));
1863  }
1864  
1865  /*
1866   * values for dd->flags (_device_ related flags)
1867   */
1868  #define HFI1_INITTED           0x1    /* chip and driver up and initted */
1869  #define HFI1_PRESENT           0x2    /* chip accesses can be done */
1870  #define HFI1_FROZEN            0x4    /* chip in SPC freeze */
1871  #define HFI1_HAS_SDMA_TIMEOUT  0x8
1872  #define HFI1_HAS_SEND_DMA      0x10   /* Supports Send DMA */
1873  #define HFI1_FORCED_FREEZE     0x80   /* driver forced freeze mode */
1874  #define HFI1_SHUTDOWN          0x100  /* device is shutting down */
1875  
1876  /* IB dword length mask in PBC (lower 11 bits); same for all chips */
1877  #define HFI1_PBC_LENGTH_MASK                     ((1 << 11) - 1)
1878  
1879  /* ctxt_flag bit offsets */
1880  		/* base context has not finished initializing */
1881  #define HFI1_CTXT_BASE_UNINIT 1
1882  		/* base context initaliation failed */
1883  #define HFI1_CTXT_BASE_FAILED 2
1884  		/* waiting for a packet to arrive */
1885  #define HFI1_CTXT_WAITING_RCV 3
1886  		/* waiting for an urgent packet to arrive */
1887  #define HFI1_CTXT_WAITING_URG 4
1888  
1889  /* free up any allocated data at closes */
1890  struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
1891  				  const struct pci_device_id *ent);
1892  void hfi1_free_devdata(struct hfi1_devdata *dd);
1893  struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
1894  
1895  /* LED beaconing functions */
1896  void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1897  			     unsigned int timeoff);
1898  void shutdown_led_override(struct hfi1_pportdata *ppd);
1899  
1900  #define HFI1_CREDIT_RETURN_RATE (100)
1901  
1902  /*
1903   * The number of words for the KDETH protocol field.  If this is
1904   * larger then the actual field used, then part of the payload
1905   * will be in the header.
1906   *
1907   * Optimally, we want this sized so that a typical case will
1908   * use full cache lines.  The typical local KDETH header would
1909   * be:
1910   *
1911   *	Bytes	Field
1912   *	  8	LRH
1913   *	 12	BHT
1914   *	 ??	KDETH
1915   *	  8	RHF
1916   *	---
1917   *	 28 + KDETH
1918   *
1919   * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
1920   */
1921  #define DEFAULT_RCVHDRSIZE 9
1922  
1923  /*
1924   * Maximal header byte count:
1925   *
1926   *	Bytes	Field
1927   *	  8	LRH
1928   *	 40	GRH (optional)
1929   *	 12	BTH
1930   *	 ??	KDETH
1931   *	  8	RHF
1932   *	---
1933   *	 68 + KDETH
1934   *
1935   * We also want to maintain a cache line alignment to assist DMA'ing
1936   * of the header bytes.  Round up to a good size.
1937   */
1938  #define DEFAULT_RCVHDR_ENTSIZE 32
1939  
1940  bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
1941  			u32 nlocked, u32 npages);
1942  int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
1943  			    size_t npages, bool writable, struct page **pages);
1944  void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
1945  			     size_t npages, bool dirty);
1946  
clear_rcvhdrtail(const struct hfi1_ctxtdata * rcd)1947  static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
1948  {
1949  	*((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
1950  }
1951  
get_rcvhdrtail(const struct hfi1_ctxtdata * rcd)1952  static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
1953  {
1954  	/*
1955  	 * volatile because it's a DMA target from the chip, routine is
1956  	 * inlined, and don't want register caching or reordering.
1957  	 */
1958  	return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
1959  }
1960  
1961  /*
1962   * sysfs interface.
1963   */
1964  
1965  extern const char ib_hfi1_version[];
1966  
1967  int hfi1_device_create(struct hfi1_devdata *dd);
1968  void hfi1_device_remove(struct hfi1_devdata *dd);
1969  
1970  int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
1971  			   struct kobject *kobj);
1972  int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
1973  void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
1974  /* Hook for sysfs read of QSFP */
1975  int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
1976  
1977  int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
1978  void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
1979  void hfi1_pcie_cleanup(struct pci_dev *pdev);
1980  int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
1981  void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
1982  int pcie_speeds(struct hfi1_devdata *dd);
1983  int request_msix(struct hfi1_devdata *dd, u32 msireq);
1984  int restore_pci_variables(struct hfi1_devdata *dd);
1985  int save_pci_variables(struct hfi1_devdata *dd);
1986  int do_pcie_gen3_transition(struct hfi1_devdata *dd);
1987  int parse_platform_config(struct hfi1_devdata *dd);
1988  int get_platform_config_field(struct hfi1_devdata *dd,
1989  			      enum platform_config_table_type_encoding
1990  			      table_type, int table_index, int field_index,
1991  			      u32 *data, u32 len);
1992  
1993  struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
1994  
1995  /*
1996   * Flush write combining store buffers (if present) and perform a write
1997   * barrier.
1998   */
flush_wc(void)1999  static inline void flush_wc(void)
2000  {
2001  	asm volatile("sfence" : : : "memory");
2002  }
2003  
2004  void handle_eflags(struct hfi1_packet *packet);
2005  void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
2006  
2007  /* global module parameter variables */
2008  extern unsigned int hfi1_max_mtu;
2009  extern unsigned int hfi1_cu;
2010  extern unsigned int user_credit_return_threshold;
2011  extern int num_user_contexts;
2012  extern unsigned long n_krcvqs;
2013  extern uint krcvqs[];
2014  extern int krcvqsset;
2015  extern uint kdeth_qp;
2016  extern uint loopback;
2017  extern uint quick_linkup;
2018  extern uint rcv_intr_timeout;
2019  extern uint rcv_intr_count;
2020  extern uint rcv_intr_dynamic;
2021  extern ushort link_crc_mask;
2022  
2023  extern struct mutex hfi1_mutex;
2024  
2025  /* Number of seconds before our card status check...  */
2026  #define STATUS_TIMEOUT 60
2027  
2028  #define DRIVER_NAME		"hfi1"
2029  #define HFI1_USER_MINOR_BASE     0
2030  #define HFI1_TRACE_MINOR         127
2031  #define HFI1_NMINORS             255
2032  
2033  #define PCI_VENDOR_ID_INTEL 0x8086
2034  #define PCI_DEVICE_ID_INTEL0 0x24f0
2035  #define PCI_DEVICE_ID_INTEL1 0x24f1
2036  
2037  #define HFI1_PKT_USER_SC_INTEGRITY					    \
2038  	(SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK	    \
2039  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK		\
2040  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK		    \
2041  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
2042  
2043  #define HFI1_PKT_KERNEL_SC_INTEGRITY					    \
2044  	(SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
2045  
hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata * dd,u16 ctxt_type)2046  static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
2047  						  u16 ctxt_type)
2048  {
2049  	u64 base_sc_integrity;
2050  
2051  	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2052  	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2053  		return 0;
2054  
2055  	base_sc_integrity =
2056  	SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2057  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
2058  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2059  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2060  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2061  #ifndef CONFIG_FAULT_INJECTION
2062  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
2063  #endif
2064  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2065  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2066  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2067  	| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
2068  	| SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2069  	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2070  	| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
2071  	| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
2072  	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
2073  	| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2074  
2075  	if (ctxt_type == SC_USER)
2076  		base_sc_integrity |=
2077  #ifndef CONFIG_FAULT_INJECTION
2078  			SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
2079  #endif
2080  			HFI1_PKT_USER_SC_INTEGRITY;
2081  	else
2082  		base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
2083  
2084  	/* turn on send-side job key checks if !A0 */
2085  	if (!is_ax(dd))
2086  		base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2087  
2088  	return base_sc_integrity;
2089  }
2090  
hfi1_pkt_base_sdma_integrity(struct hfi1_devdata * dd)2091  static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
2092  {
2093  	u64 base_sdma_integrity;
2094  
2095  	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2096  	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2097  		return 0;
2098  
2099  	base_sdma_integrity =
2100  	SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2101  	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2102  	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2103  	| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2104  	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2105  	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2106  	| SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2107  	| SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
2108  	| SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2109  	| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2110  	| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
2111  	| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
2112  	| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
2113  	| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2114  
2115  	if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
2116  		base_sdma_integrity |=
2117  		SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
2118  
2119  	/* turn on send-side job key checks if !A0 */
2120  	if (!is_ax(dd))
2121  		base_sdma_integrity |=
2122  			SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2123  
2124  	return base_sdma_integrity;
2125  }
2126  
2127  /*
2128   * hfi1_early_err is used (only!) to print early errors before devdata is
2129   * allocated, or when dd->pcidev may not be valid, and at the tail end of
2130   * cleanup when devdata may have been freed, etc.  hfi1_dev_porterr is
2131   * the same as dd_dev_err, but is used when the message really needs
2132   * the IB port# to be definitive as to what's happening..
2133   */
2134  #define hfi1_early_err(dev, fmt, ...) \
2135  	dev_err(dev, fmt, ##__VA_ARGS__)
2136  
2137  #define hfi1_early_info(dev, fmt, ...) \
2138  	dev_info(dev, fmt, ##__VA_ARGS__)
2139  
2140  #define dd_dev_emerg(dd, fmt, ...) \
2141  	dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2142  		  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2143  
2144  #define dd_dev_err(dd, fmt, ...) \
2145  	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2146  		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2147  
2148  #define dd_dev_err_ratelimited(dd, fmt, ...) \
2149  	dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2150  			    rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2151  			    ##__VA_ARGS__)
2152  
2153  #define dd_dev_warn(dd, fmt, ...) \
2154  	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2155  		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2156  
2157  #define dd_dev_warn_ratelimited(dd, fmt, ...) \
2158  	dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2159  			     rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2160  			     ##__VA_ARGS__)
2161  
2162  #define dd_dev_info(dd, fmt, ...) \
2163  	dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2164  		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2165  
2166  #define dd_dev_info_ratelimited(dd, fmt, ...) \
2167  	dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2168  			     rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2169  			     ##__VA_ARGS__)
2170  
2171  #define dd_dev_dbg(dd, fmt, ...) \
2172  	dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2173  		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2174  
2175  #define hfi1_dev_porterr(dd, port, fmt, ...) \
2176  	dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2177  		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
2178  
2179  /*
2180   * this is used for formatting hw error messages...
2181   */
2182  struct hfi1_hwerror_msgs {
2183  	u64 mask;
2184  	const char *msg;
2185  	size_t sz;
2186  };
2187  
2188  /* in intr.c... */
2189  void hfi1_format_hwerrors(u64 hwerrs,
2190  			  const struct hfi1_hwerror_msgs *hwerrmsgs,
2191  			  size_t nhwerrmsgs, char *msg, size_t lmsg);
2192  
2193  #define USER_OPCODE_CHECK_VAL 0xC0
2194  #define USER_OPCODE_CHECK_MASK 0xC0
2195  #define OPCODE_CHECK_VAL_DISABLED 0x0
2196  #define OPCODE_CHECK_MASK_DISABLED 0x0
2197  
hfi1_reset_cpu_counters(struct hfi1_devdata * dd)2198  static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
2199  {
2200  	struct hfi1_pportdata *ppd;
2201  	int i;
2202  
2203  	dd->z_int_counter = get_all_cpu_total(dd->int_counter);
2204  	dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
2205  	dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
2206  
2207  	ppd = (struct hfi1_pportdata *)(dd + 1);
2208  	for (i = 0; i < dd->num_pports; i++, ppd++) {
2209  		ppd->ibport_data.rvp.z_rc_acks =
2210  			get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
2211  		ppd->ibport_data.rvp.z_rc_qacks =
2212  			get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
2213  	}
2214  }
2215  
2216  /* Control LED state */
setextled(struct hfi1_devdata * dd,u32 on)2217  static inline void setextled(struct hfi1_devdata *dd, u32 on)
2218  {
2219  	if (on)
2220  		write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
2221  	else
2222  		write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
2223  }
2224  
2225  /* return the i2c resource given the target */
i2c_target(u32 target)2226  static inline u32 i2c_target(u32 target)
2227  {
2228  	return target ? CR_I2C2 : CR_I2C1;
2229  }
2230  
2231  /* return the i2c chain chip resource that this HFI uses for QSFP */
qsfp_resource(struct hfi1_devdata * dd)2232  static inline u32 qsfp_resource(struct hfi1_devdata *dd)
2233  {
2234  	return i2c_target(dd->hfi1_id);
2235  }
2236  
2237  /* Is this device integrated or discrete? */
is_integrated(struct hfi1_devdata * dd)2238  static inline bool is_integrated(struct hfi1_devdata *dd)
2239  {
2240  	return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
2241  }
2242  
2243  int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
2244  
2245  #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
2246  #define DD_DEV_ASSIGN(dd)      __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2247  
hfi1_update_ah_attr(struct ib_device * ibdev,struct rdma_ah_attr * attr)2248  static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
2249  				       struct rdma_ah_attr *attr)
2250  {
2251  	struct hfi1_pportdata *ppd;
2252  	struct hfi1_ibport *ibp;
2253  	u32 dlid = rdma_ah_get_dlid(attr);
2254  
2255  	/*
2256  	 * Kernel clients may not have setup GRH information
2257  	 * Set that here.
2258  	 */
2259  	ibp = to_iport(ibdev, rdma_ah_get_port_num(attr));
2260  	ppd = ppd_from_ibp(ibp);
2261  	if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
2262  	      (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) &&
2263  	    (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) &&
2264  	    (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2265  	    (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) ||
2266  	    (rdma_ah_get_make_grd(attr))) {
2267  		rdma_ah_set_ah_flags(attr, IB_AH_GRH);
2268  		rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid));
2269  		rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix);
2270  	}
2271  }
2272  
2273  /*
2274   * hfi1_check_mcast- Check if the given lid is
2275   * in the OPA multicast range.
2276   *
2277   * The LID might either reside in ah.dlid or might be
2278   * in the GRH of the address handle as DGID if extended
2279   * addresses are in use.
2280   */
hfi1_check_mcast(u32 lid)2281  static inline bool hfi1_check_mcast(u32 lid)
2282  {
2283  	return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) &&
2284  		(lid != be32_to_cpu(OPA_LID_PERMISSIVE)));
2285  }
2286  
2287  #define opa_get_lid(lid, format)	\
2288  	__opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
2289  
2290  /* Convert a lid to a specific lid space */
__opa_get_lid(u32 lid,u8 format)2291  static inline u32 __opa_get_lid(u32 lid, u8 format)
2292  {
2293  	bool is_mcast = hfi1_check_mcast(lid);
2294  
2295  	switch (format) {
2296  	case OPA_PORT_PACKET_FORMAT_8B:
2297  	case OPA_PORT_PACKET_FORMAT_10B:
2298  		if (is_mcast)
2299  			return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2300  				0xF0000);
2301  		return lid & 0xFFFFF;
2302  	case OPA_PORT_PACKET_FORMAT_16B:
2303  		if (is_mcast)
2304  			return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2305  				0xF00000);
2306  		return lid & 0xFFFFFF;
2307  	case OPA_PORT_PACKET_FORMAT_9B:
2308  		if (is_mcast)
2309  			return (lid -
2310  				opa_get_mcast_base(OPA_MCAST_NR) +
2311  				be16_to_cpu(IB_MULTICAST_LID_BASE));
2312  		else
2313  			return lid & 0xFFFF;
2314  	default:
2315  		return lid;
2316  	}
2317  }
2318  
2319  /* Return true if the given lid is the OPA 16B multicast range */
hfi1_is_16B_mcast(u32 lid)2320  static inline bool hfi1_is_16B_mcast(u32 lid)
2321  {
2322  	return ((lid >=
2323  		opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) &&
2324  		(lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)));
2325  }
2326  
hfi1_make_opa_lid(struct rdma_ah_attr * attr)2327  static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
2328  {
2329  	const struct ib_global_route *grh = rdma_ah_read_grh(attr);
2330  	u32 dlid = rdma_ah_get_dlid(attr);
2331  
2332  	/* Modify ah_attr.dlid to be in the 32 bit LID space.
2333  	 * This is how the address will be laid out:
2334  	 * Assuming MCAST_NR to be 4,
2335  	 * 32 bit permissive LID = 0xFFFFFFFF
2336  	 * Multicast LID range = 0xFFFFFFFE to 0xF0000000
2337  	 * Unicast LID range = 0xEFFFFFFF to 1
2338  	 * Invalid LID = 0
2339  	 */
2340  	if (ib_is_opa_gid(&grh->dgid))
2341  		dlid = opa_get_lid_from_gid(&grh->dgid);
2342  	else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
2343  		 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2344  		 (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
2345  		dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
2346  			opa_get_mcast_base(OPA_MCAST_NR);
2347  	else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
2348  		dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
2349  
2350  	rdma_ah_set_dlid(attr, dlid);
2351  }
2352  
hfi1_get_packet_type(u32 lid)2353  static inline u8 hfi1_get_packet_type(u32 lid)
2354  {
2355  	/* 9B if lid > 0xF0000000 */
2356  	if (lid >= opa_get_mcast_base(OPA_MCAST_NR))
2357  		return HFI1_PKT_TYPE_9B;
2358  
2359  	/* 16B if lid > 0xC000 */
2360  	if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B))
2361  		return HFI1_PKT_TYPE_16B;
2362  
2363  	return HFI1_PKT_TYPE_9B;
2364  }
2365  
hfi1_get_hdr_type(u32 lid,struct rdma_ah_attr * attr)2366  static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
2367  {
2368  	/*
2369  	 * If there was an incoming 16B packet with permissive
2370  	 * LIDs, OPA GIDs would have been programmed when those
2371  	 * packets were received. A 16B packet will have to
2372  	 * be sent in response to that packet. Return a 16B
2373  	 * header type if that's the case.
2374  	 */
2375  	if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE))
2376  		return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ?
2377  			HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
2378  
2379  	/*
2380  	 * Return a 16B header type if either the the destination
2381  	 * or source lid is extended.
2382  	 */
2383  	if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
2384  		return HFI1_PKT_TYPE_16B;
2385  
2386  	return hfi1_get_packet_type(lid);
2387  }
2388  
hfi1_make_ext_grh(struct hfi1_packet * packet,struct ib_grh * grh,u32 slid,u32 dlid)2389  static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
2390  				     struct ib_grh *grh, u32 slid,
2391  				     u32 dlid)
2392  {
2393  	struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
2394  	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2395  
2396  	if (!ibp)
2397  		return;
2398  
2399  	grh->hop_limit = 1;
2400  	grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2401  	if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))
2402  		grh->sgid.global.interface_id =
2403  			OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE));
2404  	else
2405  		grh->sgid.global.interface_id = OPA_MAKE_ID(slid);
2406  
2407  	/*
2408  	 * Upper layers (like mad) may compare the dgid in the
2409  	 * wc that is obtained here with the sgid_index in
2410  	 * the wr. Since sgid_index in wr is always 0 for
2411  	 * extended lids, set the dgid here to the default
2412  	 * IB gid.
2413  	 */
2414  	grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2415  	grh->dgid.global.interface_id =
2416  		cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
2417  }
2418  
hfi1_get_16b_padding(u32 hdr_size,u32 payload)2419  static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
2420  {
2421  	return -(hdr_size + payload + (SIZE_OF_CRC << 2) +
2422  		     SIZE_OF_LT) & 0x7;
2423  }
2424  
hfi1_make_ib_hdr(struct ib_header * hdr,u16 lrh0,u16 len,u16 dlid,u16 slid)2425  static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
2426  				    u16 lrh0, u16 len,
2427  				    u16 dlid, u16 slid)
2428  {
2429  	hdr->lrh[0] = cpu_to_be16(lrh0);
2430  	hdr->lrh[1] = cpu_to_be16(dlid);
2431  	hdr->lrh[2] = cpu_to_be16(len);
2432  	hdr->lrh[3] = cpu_to_be16(slid);
2433  }
2434  
hfi1_make_16b_hdr(struct hfi1_16b_header * hdr,u32 slid,u32 dlid,u16 len,u16 pkey,bool becn,bool fecn,u8 l4,u8 sc)2435  static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
2436  				     u32 slid, u32 dlid,
2437  				     u16 len, u16 pkey,
2438  				     bool becn, bool fecn, u8 l4,
2439  				     u8 sc)
2440  {
2441  	u32 lrh0 = 0;
2442  	u32 lrh1 = 0x40000000;
2443  	u32 lrh2 = 0;
2444  	u32 lrh3 = 0;
2445  
2446  	lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT);
2447  	lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT);
2448  	lrh0 = (lrh0 & ~OPA_16B_LID_MASK)  | (slid & OPA_16B_LID_MASK);
2449  	lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT);
2450  	lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT);
2451  	lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK);
2452  	lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) |
2453  		((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
2454  	lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
2455  		((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
2456  	lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
2457  	lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
2458  
2459  	hdr->lrh[0] = lrh0;
2460  	hdr->lrh[1] = lrh1;
2461  	hdr->lrh[2] = lrh2;
2462  	hdr->lrh[3] = lrh3;
2463  }
2464  #endif                          /* _HFI1_KERNEL_H */
2465