1  /*
2   * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3   * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4   *
5   * This software is available to you under a choice of one of two
6   * licenses.  You may choose to be licensed under the terms of the GNU
7   * General Public License (GPL) Version 2, available from the file
8   * COPYING in the main directory of this source tree, or the
9   * OpenIB.org BSD license below:
10   *
11   *     Redistribution and use in source and binary forms, with or
12   *     without modification, are permitted provided that the following
13   *     conditions are met:
14   *
15   *	- Redistributions of source code must retain the above
16   *	  copyright notice, this list of conditions and the following
17   *	  disclaimer.
18   *
19   *	- Redistributions in binary form must reproduce the above
20   *	  copyright notice, this list of conditions and the following
21   *	  disclaimer in the documentation and/or other materials
22   *	  provided with the distribution.
23   *
24   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31   * SOFTWARE.
32   */
33  
34  #ifndef RXE_HDR_H
35  #define RXE_HDR_H
36  
37  /* extracted information about a packet carried in an sk_buff struct fits in
38   * the skbuff cb array. Must be at most 48 bytes. stored in control block of
39   * sk_buff for received packets.
40   */
41  struct rxe_pkt_info {
42  	struct rxe_dev		*rxe;		/* device that owns packet */
43  	struct rxe_qp		*qp;		/* qp that owns packet */
44  	struct rxe_send_wqe	*wqe;		/* send wqe */
45  	u8			*hdr;		/* points to bth */
46  	u32			mask;		/* useful info about pkt */
47  	u32			psn;		/* bth psn of packet */
48  	u16			pkey_index;	/* partition of pkt */
49  	u16			paylen;		/* length of bth - icrc */
50  	u8			port_num;	/* port pkt received on */
51  	u8			opcode;		/* bth opcode of packet */
52  	u8			offset;		/* bth offset from pkt->hdr */
53  };
54  
55  /* Macros should be used only for received skb */
SKB_TO_PKT(struct sk_buff * skb)56  static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
57  {
58  	BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
59  	return (void *)skb->cb;
60  }
61  
PKT_TO_SKB(struct rxe_pkt_info * pkt)62  static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
63  {
64  	return container_of((void *)pkt, struct sk_buff, cb);
65  }
66  
67  /*
68   * IBA header types and methods
69   *
70   * Some of these are for reference and completeness only since
71   * rxe does not currently support RD transport
72   * most of this could be moved into IB core. ib_pack.h has
73   * part of this but is incomplete
74   *
75   * Header specific routines to insert/extract values to/from headers
76   * the routines that are named __hhh_(set_)fff() take a pointer to a
77   * hhh header and get(set) the fff field. The routines named
78   * hhh_(set_)fff take a packet info struct and find the
79   * header and field based on the opcode in the packet.
80   * Conversion to/from network byte order from cpu order is also done.
81   */
82  
83  #define RXE_ICRC_SIZE		(4)
84  #define RXE_MAX_HDR_LENGTH	(80)
85  
86  /******************************************************************************
87   * Base Transport Header
88   ******************************************************************************/
89  struct rxe_bth {
90  	u8			opcode;
91  	u8			flags;
92  	__be16			pkey;
93  	__be32			qpn;
94  	__be32			apsn;
95  };
96  
97  #define BTH_TVER		(0)
98  #define BTH_DEF_PKEY		(0xffff)
99  
100  #define BTH_SE_MASK		(0x80)
101  #define BTH_MIG_MASK		(0x40)
102  #define BTH_PAD_MASK		(0x30)
103  #define BTH_TVER_MASK		(0x0f)
104  #define BTH_FECN_MASK		(0x80000000)
105  #define BTH_BECN_MASK		(0x40000000)
106  #define BTH_RESV6A_MASK		(0x3f000000)
107  #define BTH_QPN_MASK		(0x00ffffff)
108  #define BTH_ACK_MASK		(0x80000000)
109  #define BTH_RESV7_MASK		(0x7f000000)
110  #define BTH_PSN_MASK		(0x00ffffff)
111  
__bth_opcode(void * arg)112  static inline u8 __bth_opcode(void *arg)
113  {
114  	struct rxe_bth *bth = arg;
115  
116  	return bth->opcode;
117  }
118  
__bth_set_opcode(void * arg,u8 opcode)119  static inline void __bth_set_opcode(void *arg, u8 opcode)
120  {
121  	struct rxe_bth *bth = arg;
122  
123  	bth->opcode = opcode;
124  }
125  
__bth_se(void * arg)126  static inline u8 __bth_se(void *arg)
127  {
128  	struct rxe_bth *bth = arg;
129  
130  	return 0 != (BTH_SE_MASK & bth->flags);
131  }
132  
__bth_set_se(void * arg,int se)133  static inline void __bth_set_se(void *arg, int se)
134  {
135  	struct rxe_bth *bth = arg;
136  
137  	if (se)
138  		bth->flags |= BTH_SE_MASK;
139  	else
140  		bth->flags &= ~BTH_SE_MASK;
141  }
142  
__bth_mig(void * arg)143  static inline u8 __bth_mig(void *arg)
144  {
145  	struct rxe_bth *bth = arg;
146  
147  	return 0 != (BTH_MIG_MASK & bth->flags);
148  }
149  
__bth_set_mig(void * arg,u8 mig)150  static inline void __bth_set_mig(void *arg, u8 mig)
151  {
152  	struct rxe_bth *bth = arg;
153  
154  	if (mig)
155  		bth->flags |= BTH_MIG_MASK;
156  	else
157  		bth->flags &= ~BTH_MIG_MASK;
158  }
159  
__bth_pad(void * arg)160  static inline u8 __bth_pad(void *arg)
161  {
162  	struct rxe_bth *bth = arg;
163  
164  	return (BTH_PAD_MASK & bth->flags) >> 4;
165  }
166  
__bth_set_pad(void * arg,u8 pad)167  static inline void __bth_set_pad(void *arg, u8 pad)
168  {
169  	struct rxe_bth *bth = arg;
170  
171  	bth->flags = (BTH_PAD_MASK & (pad << 4)) |
172  			(~BTH_PAD_MASK & bth->flags);
173  }
174  
__bth_tver(void * arg)175  static inline u8 __bth_tver(void *arg)
176  {
177  	struct rxe_bth *bth = arg;
178  
179  	return BTH_TVER_MASK & bth->flags;
180  }
181  
__bth_set_tver(void * arg,u8 tver)182  static inline void __bth_set_tver(void *arg, u8 tver)
183  {
184  	struct rxe_bth *bth = arg;
185  
186  	bth->flags = (BTH_TVER_MASK & tver) |
187  			(~BTH_TVER_MASK & bth->flags);
188  }
189  
__bth_pkey(void * arg)190  static inline u16 __bth_pkey(void *arg)
191  {
192  	struct rxe_bth *bth = arg;
193  
194  	return be16_to_cpu(bth->pkey);
195  }
196  
__bth_set_pkey(void * arg,u16 pkey)197  static inline void __bth_set_pkey(void *arg, u16 pkey)
198  {
199  	struct rxe_bth *bth = arg;
200  
201  	bth->pkey = cpu_to_be16(pkey);
202  }
203  
__bth_qpn(void * arg)204  static inline u32 __bth_qpn(void *arg)
205  {
206  	struct rxe_bth *bth = arg;
207  
208  	return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
209  }
210  
__bth_set_qpn(void * arg,u32 qpn)211  static inline void __bth_set_qpn(void *arg, u32 qpn)
212  {
213  	struct rxe_bth *bth = arg;
214  	u32 resvqpn = be32_to_cpu(bth->qpn);
215  
216  	bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
217  			       (~BTH_QPN_MASK & resvqpn));
218  }
219  
__bth_fecn(void * arg)220  static inline int __bth_fecn(void *arg)
221  {
222  	struct rxe_bth *bth = arg;
223  
224  	return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
225  }
226  
__bth_set_fecn(void * arg,int fecn)227  static inline void __bth_set_fecn(void *arg, int fecn)
228  {
229  	struct rxe_bth *bth = arg;
230  
231  	if (fecn)
232  		bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
233  	else
234  		bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
235  }
236  
__bth_becn(void * arg)237  static inline int __bth_becn(void *arg)
238  {
239  	struct rxe_bth *bth = arg;
240  
241  	return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
242  }
243  
__bth_set_becn(void * arg,int becn)244  static inline void __bth_set_becn(void *arg, int becn)
245  {
246  	struct rxe_bth *bth = arg;
247  
248  	if (becn)
249  		bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
250  	else
251  		bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
252  }
253  
__bth_resv6a(void * arg)254  static inline u8 __bth_resv6a(void *arg)
255  {
256  	struct rxe_bth *bth = arg;
257  
258  	return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
259  }
260  
__bth_set_resv6a(void * arg)261  static inline void __bth_set_resv6a(void *arg)
262  {
263  	struct rxe_bth *bth = arg;
264  
265  	bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
266  }
267  
__bth_ack(void * arg)268  static inline int __bth_ack(void *arg)
269  {
270  	struct rxe_bth *bth = arg;
271  
272  	return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
273  }
274  
__bth_set_ack(void * arg,int ack)275  static inline void __bth_set_ack(void *arg, int ack)
276  {
277  	struct rxe_bth *bth = arg;
278  
279  	if (ack)
280  		bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
281  	else
282  		bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
283  }
284  
__bth_set_resv7(void * arg)285  static inline void __bth_set_resv7(void *arg)
286  {
287  	struct rxe_bth *bth = arg;
288  
289  	bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
290  }
291  
__bth_psn(void * arg)292  static inline u32 __bth_psn(void *arg)
293  {
294  	struct rxe_bth *bth = arg;
295  
296  	return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
297  }
298  
__bth_set_psn(void * arg,u32 psn)299  static inline void __bth_set_psn(void *arg, u32 psn)
300  {
301  	struct rxe_bth *bth = arg;
302  	u32 apsn = be32_to_cpu(bth->apsn);
303  
304  	bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
305  			(~BTH_PSN_MASK & apsn));
306  }
307  
bth_opcode(struct rxe_pkt_info * pkt)308  static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
309  {
310  	return __bth_opcode(pkt->hdr + pkt->offset);
311  }
312  
bth_set_opcode(struct rxe_pkt_info * pkt,u8 opcode)313  static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
314  {
315  	__bth_set_opcode(pkt->hdr + pkt->offset, opcode);
316  }
317  
bth_se(struct rxe_pkt_info * pkt)318  static inline u8 bth_se(struct rxe_pkt_info *pkt)
319  {
320  	return __bth_se(pkt->hdr + pkt->offset);
321  }
322  
bth_set_se(struct rxe_pkt_info * pkt,int se)323  static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
324  {
325  	__bth_set_se(pkt->hdr + pkt->offset, se);
326  }
327  
bth_mig(struct rxe_pkt_info * pkt)328  static inline u8 bth_mig(struct rxe_pkt_info *pkt)
329  {
330  	return __bth_mig(pkt->hdr + pkt->offset);
331  }
332  
bth_set_mig(struct rxe_pkt_info * pkt,u8 mig)333  static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
334  {
335  	__bth_set_mig(pkt->hdr + pkt->offset, mig);
336  }
337  
bth_pad(struct rxe_pkt_info * pkt)338  static inline u8 bth_pad(struct rxe_pkt_info *pkt)
339  {
340  	return __bth_pad(pkt->hdr + pkt->offset);
341  }
342  
bth_set_pad(struct rxe_pkt_info * pkt,u8 pad)343  static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
344  {
345  	__bth_set_pad(pkt->hdr + pkt->offset, pad);
346  }
347  
bth_tver(struct rxe_pkt_info * pkt)348  static inline u8 bth_tver(struct rxe_pkt_info *pkt)
349  {
350  	return __bth_tver(pkt->hdr + pkt->offset);
351  }
352  
bth_set_tver(struct rxe_pkt_info * pkt,u8 tver)353  static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
354  {
355  	__bth_set_tver(pkt->hdr + pkt->offset, tver);
356  }
357  
bth_pkey(struct rxe_pkt_info * pkt)358  static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
359  {
360  	return __bth_pkey(pkt->hdr + pkt->offset);
361  }
362  
bth_set_pkey(struct rxe_pkt_info * pkt,u16 pkey)363  static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
364  {
365  	__bth_set_pkey(pkt->hdr + pkt->offset, pkey);
366  }
367  
bth_qpn(struct rxe_pkt_info * pkt)368  static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
369  {
370  	return __bth_qpn(pkt->hdr + pkt->offset);
371  }
372  
bth_set_qpn(struct rxe_pkt_info * pkt,u32 qpn)373  static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
374  {
375  	__bth_set_qpn(pkt->hdr + pkt->offset, qpn);
376  }
377  
bth_fecn(struct rxe_pkt_info * pkt)378  static inline int bth_fecn(struct rxe_pkt_info *pkt)
379  {
380  	return __bth_fecn(pkt->hdr + pkt->offset);
381  }
382  
bth_set_fecn(struct rxe_pkt_info * pkt,int fecn)383  static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
384  {
385  	__bth_set_fecn(pkt->hdr + pkt->offset, fecn);
386  }
387  
bth_becn(struct rxe_pkt_info * pkt)388  static inline int bth_becn(struct rxe_pkt_info *pkt)
389  {
390  	return __bth_becn(pkt->hdr + pkt->offset);
391  }
392  
bth_set_becn(struct rxe_pkt_info * pkt,int becn)393  static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
394  {
395  	__bth_set_becn(pkt->hdr + pkt->offset, becn);
396  }
397  
bth_resv6a(struct rxe_pkt_info * pkt)398  static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
399  {
400  	return __bth_resv6a(pkt->hdr + pkt->offset);
401  }
402  
bth_set_resv6a(struct rxe_pkt_info * pkt)403  static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
404  {
405  	__bth_set_resv6a(pkt->hdr + pkt->offset);
406  }
407  
bth_ack(struct rxe_pkt_info * pkt)408  static inline int bth_ack(struct rxe_pkt_info *pkt)
409  {
410  	return __bth_ack(pkt->hdr + pkt->offset);
411  }
412  
bth_set_ack(struct rxe_pkt_info * pkt,int ack)413  static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
414  {
415  	__bth_set_ack(pkt->hdr + pkt->offset, ack);
416  }
417  
bth_set_resv7(struct rxe_pkt_info * pkt)418  static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
419  {
420  	__bth_set_resv7(pkt->hdr + pkt->offset);
421  }
422  
bth_psn(struct rxe_pkt_info * pkt)423  static inline u32 bth_psn(struct rxe_pkt_info *pkt)
424  {
425  	return __bth_psn(pkt->hdr + pkt->offset);
426  }
427  
bth_set_psn(struct rxe_pkt_info * pkt,u32 psn)428  static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
429  {
430  	__bth_set_psn(pkt->hdr + pkt->offset, psn);
431  }
432  
bth_init(struct rxe_pkt_info * pkt,u8 opcode,int se,int mig,int pad,u16 pkey,u32 qpn,int ack_req,u32 psn)433  static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
434  			    int mig, int pad, u16 pkey, u32 qpn, int ack_req,
435  			    u32 psn)
436  {
437  	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
438  
439  	bth->opcode = opcode;
440  	bth->flags = (pad << 4) & BTH_PAD_MASK;
441  	if (se)
442  		bth->flags |= BTH_SE_MASK;
443  	if (mig)
444  		bth->flags |= BTH_MIG_MASK;
445  	bth->pkey = cpu_to_be16(pkey);
446  	bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
447  	psn &= BTH_PSN_MASK;
448  	if (ack_req)
449  		psn |= BTH_ACK_MASK;
450  	bth->apsn = cpu_to_be32(psn);
451  }
452  
453  /******************************************************************************
454   * Reliable Datagram Extended Transport Header
455   ******************************************************************************/
456  struct rxe_rdeth {
457  	__be32			een;
458  };
459  
460  #define RDETH_EEN_MASK		(0x00ffffff)
461  
__rdeth_een(void * arg)462  static inline u8 __rdeth_een(void *arg)
463  {
464  	struct rxe_rdeth *rdeth = arg;
465  
466  	return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
467  }
468  
__rdeth_set_een(void * arg,u32 een)469  static inline void __rdeth_set_een(void *arg, u32 een)
470  {
471  	struct rxe_rdeth *rdeth = arg;
472  
473  	rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
474  }
475  
rdeth_een(struct rxe_pkt_info * pkt)476  static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
477  {
478  	return __rdeth_een(pkt->hdr + pkt->offset
479  		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
480  }
481  
rdeth_set_een(struct rxe_pkt_info * pkt,u32 een)482  static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
483  {
484  	__rdeth_set_een(pkt->hdr + pkt->offset
485  		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
486  }
487  
488  /******************************************************************************
489   * Datagram Extended Transport Header
490   ******************************************************************************/
491  struct rxe_deth {
492  	__be32			qkey;
493  	__be32			sqp;
494  };
495  
496  #define GSI_QKEY		(0x80010000)
497  #define DETH_SQP_MASK		(0x00ffffff)
498  
__deth_qkey(void * arg)499  static inline u32 __deth_qkey(void *arg)
500  {
501  	struct rxe_deth *deth = arg;
502  
503  	return be32_to_cpu(deth->qkey);
504  }
505  
__deth_set_qkey(void * arg,u32 qkey)506  static inline void __deth_set_qkey(void *arg, u32 qkey)
507  {
508  	struct rxe_deth *deth = arg;
509  
510  	deth->qkey = cpu_to_be32(qkey);
511  }
512  
__deth_sqp(void * arg)513  static inline u32 __deth_sqp(void *arg)
514  {
515  	struct rxe_deth *deth = arg;
516  
517  	return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
518  }
519  
__deth_set_sqp(void * arg,u32 sqp)520  static inline void __deth_set_sqp(void *arg, u32 sqp)
521  {
522  	struct rxe_deth *deth = arg;
523  
524  	deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
525  }
526  
deth_qkey(struct rxe_pkt_info * pkt)527  static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
528  {
529  	return __deth_qkey(pkt->hdr + pkt->offset
530  		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
531  }
532  
deth_set_qkey(struct rxe_pkt_info * pkt,u32 qkey)533  static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
534  {
535  	__deth_set_qkey(pkt->hdr + pkt->offset
536  		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
537  }
538  
deth_sqp(struct rxe_pkt_info * pkt)539  static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
540  {
541  	return __deth_sqp(pkt->hdr + pkt->offset
542  		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
543  }
544  
deth_set_sqp(struct rxe_pkt_info * pkt,u32 sqp)545  static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
546  {
547  	__deth_set_sqp(pkt->hdr + pkt->offset
548  		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
549  }
550  
551  /******************************************************************************
552   * RDMA Extended Transport Header
553   ******************************************************************************/
554  struct rxe_reth {
555  	__be64			va;
556  	__be32			rkey;
557  	__be32			len;
558  };
559  
__reth_va(void * arg)560  static inline u64 __reth_va(void *arg)
561  {
562  	struct rxe_reth *reth = arg;
563  
564  	return be64_to_cpu(reth->va);
565  }
566  
__reth_set_va(void * arg,u64 va)567  static inline void __reth_set_va(void *arg, u64 va)
568  {
569  	struct rxe_reth *reth = arg;
570  
571  	reth->va = cpu_to_be64(va);
572  }
573  
__reth_rkey(void * arg)574  static inline u32 __reth_rkey(void *arg)
575  {
576  	struct rxe_reth *reth = arg;
577  
578  	return be32_to_cpu(reth->rkey);
579  }
580  
__reth_set_rkey(void * arg,u32 rkey)581  static inline void __reth_set_rkey(void *arg, u32 rkey)
582  {
583  	struct rxe_reth *reth = arg;
584  
585  	reth->rkey = cpu_to_be32(rkey);
586  }
587  
__reth_len(void * arg)588  static inline u32 __reth_len(void *arg)
589  {
590  	struct rxe_reth *reth = arg;
591  
592  	return be32_to_cpu(reth->len);
593  }
594  
__reth_set_len(void * arg,u32 len)595  static inline void __reth_set_len(void *arg, u32 len)
596  {
597  	struct rxe_reth *reth = arg;
598  
599  	reth->len = cpu_to_be32(len);
600  }
601  
reth_va(struct rxe_pkt_info * pkt)602  static inline u64 reth_va(struct rxe_pkt_info *pkt)
603  {
604  	return __reth_va(pkt->hdr + pkt->offset
605  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
606  }
607  
reth_set_va(struct rxe_pkt_info * pkt,u64 va)608  static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
609  {
610  	__reth_set_va(pkt->hdr + pkt->offset
611  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
612  }
613  
reth_rkey(struct rxe_pkt_info * pkt)614  static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
615  {
616  	return __reth_rkey(pkt->hdr + pkt->offset
617  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
618  }
619  
reth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)620  static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
621  {
622  	__reth_set_rkey(pkt->hdr + pkt->offset
623  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
624  }
625  
reth_len(struct rxe_pkt_info * pkt)626  static inline u32 reth_len(struct rxe_pkt_info *pkt)
627  {
628  	return __reth_len(pkt->hdr + pkt->offset
629  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
630  }
631  
reth_set_len(struct rxe_pkt_info * pkt,u32 len)632  static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
633  {
634  	__reth_set_len(pkt->hdr + pkt->offset
635  		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
636  }
637  
638  /******************************************************************************
639   * Atomic Extended Transport Header
640   ******************************************************************************/
641  struct rxe_atmeth {
642  	__be64			va;
643  	__be32			rkey;
644  	__be64			swap_add;
645  	__be64			comp;
646  } __attribute__((__packed__));
647  
__atmeth_va(void * arg)648  static inline u64 __atmeth_va(void *arg)
649  {
650  	struct rxe_atmeth *atmeth = arg;
651  
652  	return be64_to_cpu(atmeth->va);
653  }
654  
__atmeth_set_va(void * arg,u64 va)655  static inline void __atmeth_set_va(void *arg, u64 va)
656  {
657  	struct rxe_atmeth *atmeth = arg;
658  
659  	atmeth->va = cpu_to_be64(va);
660  }
661  
__atmeth_rkey(void * arg)662  static inline u32 __atmeth_rkey(void *arg)
663  {
664  	struct rxe_atmeth *atmeth = arg;
665  
666  	return be32_to_cpu(atmeth->rkey);
667  }
668  
__atmeth_set_rkey(void * arg,u32 rkey)669  static inline void __atmeth_set_rkey(void *arg, u32 rkey)
670  {
671  	struct rxe_atmeth *atmeth = arg;
672  
673  	atmeth->rkey = cpu_to_be32(rkey);
674  }
675  
__atmeth_swap_add(void * arg)676  static inline u64 __atmeth_swap_add(void *arg)
677  {
678  	struct rxe_atmeth *atmeth = arg;
679  
680  	return be64_to_cpu(atmeth->swap_add);
681  }
682  
__atmeth_set_swap_add(void * arg,u64 swap_add)683  static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
684  {
685  	struct rxe_atmeth *atmeth = arg;
686  
687  	atmeth->swap_add = cpu_to_be64(swap_add);
688  }
689  
__atmeth_comp(void * arg)690  static inline u64 __atmeth_comp(void *arg)
691  {
692  	struct rxe_atmeth *atmeth = arg;
693  
694  	return be64_to_cpu(atmeth->comp);
695  }
696  
__atmeth_set_comp(void * arg,u64 comp)697  static inline void __atmeth_set_comp(void *arg, u64 comp)
698  {
699  	struct rxe_atmeth *atmeth = arg;
700  
701  	atmeth->comp = cpu_to_be64(comp);
702  }
703  
atmeth_va(struct rxe_pkt_info * pkt)704  static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
705  {
706  	return __atmeth_va(pkt->hdr + pkt->offset
707  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
708  }
709  
atmeth_set_va(struct rxe_pkt_info * pkt,u64 va)710  static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
711  {
712  	__atmeth_set_va(pkt->hdr + pkt->offset
713  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
714  }
715  
atmeth_rkey(struct rxe_pkt_info * pkt)716  static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
717  {
718  	return __atmeth_rkey(pkt->hdr + pkt->offset
719  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
720  }
721  
atmeth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)722  static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
723  {
724  	__atmeth_set_rkey(pkt->hdr + pkt->offset
725  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
726  }
727  
atmeth_swap_add(struct rxe_pkt_info * pkt)728  static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
729  {
730  	return __atmeth_swap_add(pkt->hdr + pkt->offset
731  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
732  }
733  
atmeth_set_swap_add(struct rxe_pkt_info * pkt,u64 swap_add)734  static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
735  {
736  	__atmeth_set_swap_add(pkt->hdr + pkt->offset
737  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
738  }
739  
atmeth_comp(struct rxe_pkt_info * pkt)740  static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
741  {
742  	return __atmeth_comp(pkt->hdr + pkt->offset
743  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
744  }
745  
atmeth_set_comp(struct rxe_pkt_info * pkt,u64 comp)746  static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
747  {
748  	__atmeth_set_comp(pkt->hdr + pkt->offset
749  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
750  }
751  
752  /******************************************************************************
753   * Ack Extended Transport Header
754   ******************************************************************************/
755  struct rxe_aeth {
756  	__be32			smsn;
757  };
758  
759  #define AETH_SYN_MASK		(0xff000000)
760  #define AETH_MSN_MASK		(0x00ffffff)
761  
762  enum aeth_syndrome {
763  	AETH_TYPE_MASK		= 0xe0,
764  	AETH_ACK		= 0x00,
765  	AETH_RNR_NAK		= 0x20,
766  	AETH_RSVD		= 0x40,
767  	AETH_NAK		= 0x60,
768  	AETH_ACK_UNLIMITED	= 0x1f,
769  	AETH_NAK_PSN_SEQ_ERROR	= 0x60,
770  	AETH_NAK_INVALID_REQ	= 0x61,
771  	AETH_NAK_REM_ACC_ERR	= 0x62,
772  	AETH_NAK_REM_OP_ERR	= 0x63,
773  	AETH_NAK_INV_RD_REQ	= 0x64,
774  };
775  
__aeth_syn(void * arg)776  static inline u8 __aeth_syn(void *arg)
777  {
778  	struct rxe_aeth *aeth = arg;
779  
780  	return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
781  }
782  
__aeth_set_syn(void * arg,u8 syn)783  static inline void __aeth_set_syn(void *arg, u8 syn)
784  {
785  	struct rxe_aeth *aeth = arg;
786  	u32 smsn = be32_to_cpu(aeth->smsn);
787  
788  	aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
789  			 (~AETH_SYN_MASK & smsn));
790  }
791  
__aeth_msn(void * arg)792  static inline u32 __aeth_msn(void *arg)
793  {
794  	struct rxe_aeth *aeth = arg;
795  
796  	return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
797  }
798  
__aeth_set_msn(void * arg,u32 msn)799  static inline void __aeth_set_msn(void *arg, u32 msn)
800  {
801  	struct rxe_aeth *aeth = arg;
802  	u32 smsn = be32_to_cpu(aeth->smsn);
803  
804  	aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
805  			 (~AETH_MSN_MASK & smsn));
806  }
807  
aeth_syn(struct rxe_pkt_info * pkt)808  static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
809  {
810  	return __aeth_syn(pkt->hdr + pkt->offset
811  		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
812  }
813  
aeth_set_syn(struct rxe_pkt_info * pkt,u8 syn)814  static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
815  {
816  	__aeth_set_syn(pkt->hdr + pkt->offset
817  		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
818  }
819  
aeth_msn(struct rxe_pkt_info * pkt)820  static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
821  {
822  	return __aeth_msn(pkt->hdr + pkt->offset
823  		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
824  }
825  
aeth_set_msn(struct rxe_pkt_info * pkt,u32 msn)826  static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
827  {
828  	__aeth_set_msn(pkt->hdr + pkt->offset
829  		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
830  }
831  
832  /******************************************************************************
833   * Atomic Ack Extended Transport Header
834   ******************************************************************************/
835  struct rxe_atmack {
836  	__be64			orig;
837  };
838  
__atmack_orig(void * arg)839  static inline u64 __atmack_orig(void *arg)
840  {
841  	struct rxe_atmack *atmack = arg;
842  
843  	return be64_to_cpu(atmack->orig);
844  }
845  
__atmack_set_orig(void * arg,u64 orig)846  static inline void __atmack_set_orig(void *arg, u64 orig)
847  {
848  	struct rxe_atmack *atmack = arg;
849  
850  	atmack->orig = cpu_to_be64(orig);
851  }
852  
atmack_orig(struct rxe_pkt_info * pkt)853  static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
854  {
855  	return __atmack_orig(pkt->hdr + pkt->offset
856  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
857  }
858  
atmack_set_orig(struct rxe_pkt_info * pkt,u64 orig)859  static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
860  {
861  	__atmack_set_orig(pkt->hdr + pkt->offset
862  		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
863  }
864  
865  /******************************************************************************
866   * Immediate Extended Transport Header
867   ******************************************************************************/
868  struct rxe_immdt {
869  	__be32			imm;
870  };
871  
__immdt_imm(void * arg)872  static inline __be32 __immdt_imm(void *arg)
873  {
874  	struct rxe_immdt *immdt = arg;
875  
876  	return immdt->imm;
877  }
878  
__immdt_set_imm(void * arg,__be32 imm)879  static inline void __immdt_set_imm(void *arg, __be32 imm)
880  {
881  	struct rxe_immdt *immdt = arg;
882  
883  	immdt->imm = imm;
884  }
885  
immdt_imm(struct rxe_pkt_info * pkt)886  static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
887  {
888  	return __immdt_imm(pkt->hdr + pkt->offset
889  		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
890  }
891  
immdt_set_imm(struct rxe_pkt_info * pkt,__be32 imm)892  static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
893  {
894  	__immdt_set_imm(pkt->hdr + pkt->offset
895  		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
896  }
897  
898  /******************************************************************************
899   * Invalidate Extended Transport Header
900   ******************************************************************************/
901  struct rxe_ieth {
902  	__be32			rkey;
903  };
904  
__ieth_rkey(void * arg)905  static inline u32 __ieth_rkey(void *arg)
906  {
907  	struct rxe_ieth *ieth = arg;
908  
909  	return be32_to_cpu(ieth->rkey);
910  }
911  
__ieth_set_rkey(void * arg,u32 rkey)912  static inline void __ieth_set_rkey(void *arg, u32 rkey)
913  {
914  	struct rxe_ieth *ieth = arg;
915  
916  	ieth->rkey = cpu_to_be32(rkey);
917  }
918  
ieth_rkey(struct rxe_pkt_info * pkt)919  static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
920  {
921  	return __ieth_rkey(pkt->hdr + pkt->offset
922  		+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
923  }
924  
ieth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)925  static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
926  {
927  	__ieth_set_rkey(pkt->hdr + pkt->offset
928  		+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
929  }
930  
931  enum rxe_hdr_length {
932  	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
933  	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
934  	RXE_IMMDT_BYTES		= sizeof(struct rxe_immdt),
935  	RXE_RETH_BYTES		= sizeof(struct rxe_reth),
936  	RXE_AETH_BYTES		= sizeof(struct rxe_aeth),
937  	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
938  	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
939  	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
940  	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
941  };
942  
header_size(struct rxe_pkt_info * pkt)943  static inline size_t header_size(struct rxe_pkt_info *pkt)
944  {
945  	return pkt->offset + rxe_opcode[pkt->opcode].length;
946  }
947  
payload_addr(struct rxe_pkt_info * pkt)948  static inline void *payload_addr(struct rxe_pkt_info *pkt)
949  {
950  	return pkt->hdr + pkt->offset
951  		+ rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
952  }
953  
payload_size(struct rxe_pkt_info * pkt)954  static inline size_t payload_size(struct rxe_pkt_info *pkt)
955  {
956  	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
957  		- bth_pad(pkt) - RXE_ICRC_SIZE;
958  }
959  
960  #endif /* RXE_HDR_H */
961