1  /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2  /*
3   * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4   * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5   */
6  
7  #ifndef RXE_HDR_H
8  #define RXE_HDR_H
9  
10  /* extracted information about a packet carried in an sk_buff struct fits in
11   * the skbuff cb array. Must be at most 48 bytes. stored in control block of
12   * sk_buff for received packets.
13   */
14  struct rxe_pkt_info {
15  	struct rxe_dev		*rxe;		/* device that owns packet */
16  	struct rxe_qp		*qp;		/* qp that owns packet */
17  	struct rxe_send_wqe	*wqe;		/* send wqe */
18  	u8			*hdr;		/* points to bth */
19  	u32			mask;		/* useful info about pkt */
20  	u32			psn;		/* bth psn of packet */
21  	u16			pkey_index;	/* partition of pkt */
22  	u16			paylen;		/* length of bth - icrc */
23  	u8			port_num;	/* port pkt received on */
24  	u8			opcode;		/* bth opcode of packet */
25  };
26  
27  /* Macros should be used only for received skb */
SKB_TO_PKT(struct sk_buff * skb)28  static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
29  {
30  	BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
31  	return (void *)skb->cb;
32  }
33  
PKT_TO_SKB(struct rxe_pkt_info * pkt)34  static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
35  {
36  	return container_of((void *)pkt, struct sk_buff, cb);
37  }
38  
39  /*
40   * IBA header types and methods
41   *
42   * Some of these are for reference and completeness only since
43   * rxe does not currently support RD transport
44   * most of this could be moved into IB core. ib_pack.h has
45   * part of this but is incomplete
46   *
47   * Header specific routines to insert/extract values to/from headers
48   * the routines that are named __hhh_(set_)fff() take a pointer to a
49   * hhh header and get(set) the fff field. The routines named
50   * hhh_(set_)fff take a packet info struct and find the
51   * header and field based on the opcode in the packet.
52   * Conversion to/from network byte order from cpu order is also done.
53   */
54  
55  #define RXE_ICRC_SIZE		(4)
56  #define RXE_MAX_HDR_LENGTH	(80)
57  
58  /******************************************************************************
59   * Base Transport Header
60   ******************************************************************************/
61  struct rxe_bth {
62  	u8			opcode;
63  	u8			flags;
64  	__be16			pkey;
65  	__be32			qpn;
66  	__be32			apsn;
67  };
68  
69  #define BTH_TVER		(0)
70  #define BTH_DEF_PKEY		(0xffff)
71  
72  #define BTH_SE_MASK		(0x80)
73  #define BTH_MIG_MASK		(0x40)
74  #define BTH_PAD_MASK		(0x30)
75  #define BTH_TVER_MASK		(0x0f)
76  #define BTH_FECN_MASK		(0x80000000)
77  #define BTH_BECN_MASK		(0x40000000)
78  #define BTH_RESV6A_MASK		(0x3f000000)
79  #define BTH_QPN_MASK		(0x00ffffff)
80  #define BTH_ACK_MASK		(0x80000000)
81  #define BTH_RESV7_MASK		(0x7f000000)
82  #define BTH_PSN_MASK		(0x00ffffff)
83  
__bth_opcode(void * arg)84  static inline u8 __bth_opcode(void *arg)
85  {
86  	struct rxe_bth *bth = arg;
87  
88  	return bth->opcode;
89  }
90  
__bth_set_opcode(void * arg,u8 opcode)91  static inline void __bth_set_opcode(void *arg, u8 opcode)
92  {
93  	struct rxe_bth *bth = arg;
94  
95  	bth->opcode = opcode;
96  }
97  
__bth_se(void * arg)98  static inline u8 __bth_se(void *arg)
99  {
100  	struct rxe_bth *bth = arg;
101  
102  	return 0 != (BTH_SE_MASK & bth->flags);
103  }
104  
__bth_set_se(void * arg,int se)105  static inline void __bth_set_se(void *arg, int se)
106  {
107  	struct rxe_bth *bth = arg;
108  
109  	if (se)
110  		bth->flags |= BTH_SE_MASK;
111  	else
112  		bth->flags &= ~BTH_SE_MASK;
113  }
114  
__bth_mig(void * arg)115  static inline u8 __bth_mig(void *arg)
116  {
117  	struct rxe_bth *bth = arg;
118  
119  	return 0 != (BTH_MIG_MASK & bth->flags);
120  }
121  
__bth_set_mig(void * arg,u8 mig)122  static inline void __bth_set_mig(void *arg, u8 mig)
123  {
124  	struct rxe_bth *bth = arg;
125  
126  	if (mig)
127  		bth->flags |= BTH_MIG_MASK;
128  	else
129  		bth->flags &= ~BTH_MIG_MASK;
130  }
131  
__bth_pad(void * arg)132  static inline u8 __bth_pad(void *arg)
133  {
134  	struct rxe_bth *bth = arg;
135  
136  	return (BTH_PAD_MASK & bth->flags) >> 4;
137  }
138  
__bth_set_pad(void * arg,u8 pad)139  static inline void __bth_set_pad(void *arg, u8 pad)
140  {
141  	struct rxe_bth *bth = arg;
142  
143  	bth->flags = (BTH_PAD_MASK & (pad << 4)) |
144  			(~BTH_PAD_MASK & bth->flags);
145  }
146  
__bth_tver(void * arg)147  static inline u8 __bth_tver(void *arg)
148  {
149  	struct rxe_bth *bth = arg;
150  
151  	return BTH_TVER_MASK & bth->flags;
152  }
153  
__bth_set_tver(void * arg,u8 tver)154  static inline void __bth_set_tver(void *arg, u8 tver)
155  {
156  	struct rxe_bth *bth = arg;
157  
158  	bth->flags = (BTH_TVER_MASK & tver) |
159  			(~BTH_TVER_MASK & bth->flags);
160  }
161  
__bth_pkey(void * arg)162  static inline u16 __bth_pkey(void *arg)
163  {
164  	struct rxe_bth *bth = arg;
165  
166  	return be16_to_cpu(bth->pkey);
167  }
168  
__bth_set_pkey(void * arg,u16 pkey)169  static inline void __bth_set_pkey(void *arg, u16 pkey)
170  {
171  	struct rxe_bth *bth = arg;
172  
173  	bth->pkey = cpu_to_be16(pkey);
174  }
175  
__bth_qpn(void * arg)176  static inline u32 __bth_qpn(void *arg)
177  {
178  	struct rxe_bth *bth = arg;
179  
180  	return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
181  }
182  
__bth_set_qpn(void * arg,u32 qpn)183  static inline void __bth_set_qpn(void *arg, u32 qpn)
184  {
185  	struct rxe_bth *bth = arg;
186  	u32 resvqpn = be32_to_cpu(bth->qpn);
187  
188  	bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
189  			       (~BTH_QPN_MASK & resvqpn));
190  }
191  
__bth_fecn(void * arg)192  static inline int __bth_fecn(void *arg)
193  {
194  	struct rxe_bth *bth = arg;
195  
196  	return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
197  }
198  
__bth_set_fecn(void * arg,int fecn)199  static inline void __bth_set_fecn(void *arg, int fecn)
200  {
201  	struct rxe_bth *bth = arg;
202  
203  	if (fecn)
204  		bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
205  	else
206  		bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
207  }
208  
__bth_becn(void * arg)209  static inline int __bth_becn(void *arg)
210  {
211  	struct rxe_bth *bth = arg;
212  
213  	return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
214  }
215  
__bth_set_becn(void * arg,int becn)216  static inline void __bth_set_becn(void *arg, int becn)
217  {
218  	struct rxe_bth *bth = arg;
219  
220  	if (becn)
221  		bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
222  	else
223  		bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
224  }
225  
__bth_resv6a(void * arg)226  static inline u8 __bth_resv6a(void *arg)
227  {
228  	struct rxe_bth *bth = arg;
229  
230  	return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
231  }
232  
__bth_set_resv6a(void * arg)233  static inline void __bth_set_resv6a(void *arg)
234  {
235  	struct rxe_bth *bth = arg;
236  
237  	bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
238  }
239  
__bth_ack(void * arg)240  static inline int __bth_ack(void *arg)
241  {
242  	struct rxe_bth *bth = arg;
243  
244  	return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
245  }
246  
__bth_set_ack(void * arg,int ack)247  static inline void __bth_set_ack(void *arg, int ack)
248  {
249  	struct rxe_bth *bth = arg;
250  
251  	if (ack)
252  		bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
253  	else
254  		bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
255  }
256  
__bth_set_resv7(void * arg)257  static inline void __bth_set_resv7(void *arg)
258  {
259  	struct rxe_bth *bth = arg;
260  
261  	bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
262  }
263  
__bth_psn(void * arg)264  static inline u32 __bth_psn(void *arg)
265  {
266  	struct rxe_bth *bth = arg;
267  
268  	return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
269  }
270  
__bth_set_psn(void * arg,u32 psn)271  static inline void __bth_set_psn(void *arg, u32 psn)
272  {
273  	struct rxe_bth *bth = arg;
274  	u32 apsn = be32_to_cpu(bth->apsn);
275  
276  	bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
277  			(~BTH_PSN_MASK & apsn));
278  }
279  
bth_opcode(struct rxe_pkt_info * pkt)280  static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
281  {
282  	return __bth_opcode(pkt->hdr);
283  }
284  
bth_set_opcode(struct rxe_pkt_info * pkt,u8 opcode)285  static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
286  {
287  	__bth_set_opcode(pkt->hdr, opcode);
288  }
289  
bth_se(struct rxe_pkt_info * pkt)290  static inline u8 bth_se(struct rxe_pkt_info *pkt)
291  {
292  	return __bth_se(pkt->hdr);
293  }
294  
bth_set_se(struct rxe_pkt_info * pkt,int se)295  static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
296  {
297  	__bth_set_se(pkt->hdr, se);
298  }
299  
bth_mig(struct rxe_pkt_info * pkt)300  static inline u8 bth_mig(struct rxe_pkt_info *pkt)
301  {
302  	return __bth_mig(pkt->hdr);
303  }
304  
bth_set_mig(struct rxe_pkt_info * pkt,u8 mig)305  static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
306  {
307  	__bth_set_mig(pkt->hdr, mig);
308  }
309  
bth_pad(struct rxe_pkt_info * pkt)310  static inline u8 bth_pad(struct rxe_pkt_info *pkt)
311  {
312  	return __bth_pad(pkt->hdr);
313  }
314  
bth_set_pad(struct rxe_pkt_info * pkt,u8 pad)315  static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
316  {
317  	__bth_set_pad(pkt->hdr, pad);
318  }
319  
bth_tver(struct rxe_pkt_info * pkt)320  static inline u8 bth_tver(struct rxe_pkt_info *pkt)
321  {
322  	return __bth_tver(pkt->hdr);
323  }
324  
bth_set_tver(struct rxe_pkt_info * pkt,u8 tver)325  static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
326  {
327  	__bth_set_tver(pkt->hdr, tver);
328  }
329  
bth_pkey(struct rxe_pkt_info * pkt)330  static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
331  {
332  	return __bth_pkey(pkt->hdr);
333  }
334  
bth_set_pkey(struct rxe_pkt_info * pkt,u16 pkey)335  static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
336  {
337  	__bth_set_pkey(pkt->hdr, pkey);
338  }
339  
bth_qpn(struct rxe_pkt_info * pkt)340  static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
341  {
342  	return __bth_qpn(pkt->hdr);
343  }
344  
bth_set_qpn(struct rxe_pkt_info * pkt,u32 qpn)345  static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
346  {
347  	__bth_set_qpn(pkt->hdr, qpn);
348  }
349  
bth_fecn(struct rxe_pkt_info * pkt)350  static inline int bth_fecn(struct rxe_pkt_info *pkt)
351  {
352  	return __bth_fecn(pkt->hdr);
353  }
354  
bth_set_fecn(struct rxe_pkt_info * pkt,int fecn)355  static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
356  {
357  	__bth_set_fecn(pkt->hdr, fecn);
358  }
359  
bth_becn(struct rxe_pkt_info * pkt)360  static inline int bth_becn(struct rxe_pkt_info *pkt)
361  {
362  	return __bth_becn(pkt->hdr);
363  }
364  
bth_set_becn(struct rxe_pkt_info * pkt,int becn)365  static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
366  {
367  	__bth_set_becn(pkt->hdr, becn);
368  }
369  
bth_resv6a(struct rxe_pkt_info * pkt)370  static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
371  {
372  	return __bth_resv6a(pkt->hdr);
373  }
374  
bth_set_resv6a(struct rxe_pkt_info * pkt)375  static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
376  {
377  	__bth_set_resv6a(pkt->hdr);
378  }
379  
bth_ack(struct rxe_pkt_info * pkt)380  static inline int bth_ack(struct rxe_pkt_info *pkt)
381  {
382  	return __bth_ack(pkt->hdr);
383  }
384  
bth_set_ack(struct rxe_pkt_info * pkt,int ack)385  static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
386  {
387  	__bth_set_ack(pkt->hdr, ack);
388  }
389  
bth_set_resv7(struct rxe_pkt_info * pkt)390  static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
391  {
392  	__bth_set_resv7(pkt->hdr);
393  }
394  
bth_psn(struct rxe_pkt_info * pkt)395  static inline u32 bth_psn(struct rxe_pkt_info *pkt)
396  {
397  	return __bth_psn(pkt->hdr);
398  }
399  
bth_set_psn(struct rxe_pkt_info * pkt,u32 psn)400  static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
401  {
402  	__bth_set_psn(pkt->hdr, psn);
403  }
404  
bth_init(struct rxe_pkt_info * pkt,u8 opcode,int se,int mig,int pad,u16 pkey,u32 qpn,int ack_req,u32 psn)405  static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
406  			    int mig, int pad, u16 pkey, u32 qpn, int ack_req,
407  			    u32 psn)
408  {
409  	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
410  
411  	bth->opcode = opcode;
412  	bth->flags = (pad << 4) & BTH_PAD_MASK;
413  	if (se)
414  		bth->flags |= BTH_SE_MASK;
415  	if (mig)
416  		bth->flags |= BTH_MIG_MASK;
417  	bth->pkey = cpu_to_be16(pkey);
418  	bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
419  	psn &= BTH_PSN_MASK;
420  	if (ack_req)
421  		psn |= BTH_ACK_MASK;
422  	bth->apsn = cpu_to_be32(psn);
423  }
424  
425  /******************************************************************************
426   * Reliable Datagram Extended Transport Header
427   ******************************************************************************/
428  struct rxe_rdeth {
429  	__be32			een;
430  };
431  
432  #define RDETH_EEN_MASK		(0x00ffffff)
433  
__rdeth_een(void * arg)434  static inline u8 __rdeth_een(void *arg)
435  {
436  	struct rxe_rdeth *rdeth = arg;
437  
438  	return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
439  }
440  
__rdeth_set_een(void * arg,u32 een)441  static inline void __rdeth_set_een(void *arg, u32 een)
442  {
443  	struct rxe_rdeth *rdeth = arg;
444  
445  	rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
446  }
447  
rdeth_een(struct rxe_pkt_info * pkt)448  static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
449  {
450  	return __rdeth_een(pkt->hdr +
451  		rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
452  }
453  
rdeth_set_een(struct rxe_pkt_info * pkt,u32 een)454  static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
455  {
456  	__rdeth_set_een(pkt->hdr +
457  		rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
458  }
459  
460  /******************************************************************************
461   * Datagram Extended Transport Header
462   ******************************************************************************/
463  struct rxe_deth {
464  	__be32			qkey;
465  	__be32			sqp;
466  };
467  
468  #define GSI_QKEY		(0x80010000)
469  #define DETH_SQP_MASK		(0x00ffffff)
470  
__deth_qkey(void * arg)471  static inline u32 __deth_qkey(void *arg)
472  {
473  	struct rxe_deth *deth = arg;
474  
475  	return be32_to_cpu(deth->qkey);
476  }
477  
__deth_set_qkey(void * arg,u32 qkey)478  static inline void __deth_set_qkey(void *arg, u32 qkey)
479  {
480  	struct rxe_deth *deth = arg;
481  
482  	deth->qkey = cpu_to_be32(qkey);
483  }
484  
__deth_sqp(void * arg)485  static inline u32 __deth_sqp(void *arg)
486  {
487  	struct rxe_deth *deth = arg;
488  
489  	return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
490  }
491  
__deth_set_sqp(void * arg,u32 sqp)492  static inline void __deth_set_sqp(void *arg, u32 sqp)
493  {
494  	struct rxe_deth *deth = arg;
495  
496  	deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
497  }
498  
deth_qkey(struct rxe_pkt_info * pkt)499  static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
500  {
501  	return __deth_qkey(pkt->hdr +
502  		rxe_opcode[pkt->opcode].offset[RXE_DETH]);
503  }
504  
deth_set_qkey(struct rxe_pkt_info * pkt,u32 qkey)505  static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
506  {
507  	__deth_set_qkey(pkt->hdr +
508  		rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
509  }
510  
deth_sqp(struct rxe_pkt_info * pkt)511  static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
512  {
513  	return __deth_sqp(pkt->hdr +
514  		rxe_opcode[pkt->opcode].offset[RXE_DETH]);
515  }
516  
deth_set_sqp(struct rxe_pkt_info * pkt,u32 sqp)517  static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
518  {
519  	__deth_set_sqp(pkt->hdr +
520  		rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
521  }
522  
523  /******************************************************************************
524   * RDMA Extended Transport Header
525   ******************************************************************************/
526  struct rxe_reth {
527  	__be64			va;
528  	__be32			rkey;
529  	__be32			len;
530  };
531  
__reth_va(void * arg)532  static inline u64 __reth_va(void *arg)
533  {
534  	struct rxe_reth *reth = arg;
535  
536  	return be64_to_cpu(reth->va);
537  }
538  
__reth_set_va(void * arg,u64 va)539  static inline void __reth_set_va(void *arg, u64 va)
540  {
541  	struct rxe_reth *reth = arg;
542  
543  	reth->va = cpu_to_be64(va);
544  }
545  
__reth_rkey(void * arg)546  static inline u32 __reth_rkey(void *arg)
547  {
548  	struct rxe_reth *reth = arg;
549  
550  	return be32_to_cpu(reth->rkey);
551  }
552  
__reth_set_rkey(void * arg,u32 rkey)553  static inline void __reth_set_rkey(void *arg, u32 rkey)
554  {
555  	struct rxe_reth *reth = arg;
556  
557  	reth->rkey = cpu_to_be32(rkey);
558  }
559  
__reth_len(void * arg)560  static inline u32 __reth_len(void *arg)
561  {
562  	struct rxe_reth *reth = arg;
563  
564  	return be32_to_cpu(reth->len);
565  }
566  
__reth_set_len(void * arg,u32 len)567  static inline void __reth_set_len(void *arg, u32 len)
568  {
569  	struct rxe_reth *reth = arg;
570  
571  	reth->len = cpu_to_be32(len);
572  }
573  
reth_va(struct rxe_pkt_info * pkt)574  static inline u64 reth_va(struct rxe_pkt_info *pkt)
575  {
576  	return __reth_va(pkt->hdr +
577  		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
578  }
579  
reth_set_va(struct rxe_pkt_info * pkt,u64 va)580  static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
581  {
582  	__reth_set_va(pkt->hdr +
583  		rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
584  }
585  
reth_rkey(struct rxe_pkt_info * pkt)586  static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
587  {
588  	return __reth_rkey(pkt->hdr +
589  		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
590  }
591  
reth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)592  static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
593  {
594  	__reth_set_rkey(pkt->hdr +
595  		rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
596  }
597  
reth_len(struct rxe_pkt_info * pkt)598  static inline u32 reth_len(struct rxe_pkt_info *pkt)
599  {
600  	return __reth_len(pkt->hdr +
601  		rxe_opcode[pkt->opcode].offset[RXE_RETH]);
602  }
603  
reth_set_len(struct rxe_pkt_info * pkt,u32 len)604  static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
605  {
606  	__reth_set_len(pkt->hdr +
607  		rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
608  }
609  
610  /******************************************************************************
611   * Atomic Extended Transport Header
612   ******************************************************************************/
613  struct rxe_atmeth {
614  	__be64			va;
615  	__be32			rkey;
616  	__be64			swap_add;
617  	__be64			comp;
618  } __packed;
619  
__atmeth_va(void * arg)620  static inline u64 __atmeth_va(void *arg)
621  {
622  	struct rxe_atmeth *atmeth = arg;
623  
624  	return be64_to_cpu(atmeth->va);
625  }
626  
__atmeth_set_va(void * arg,u64 va)627  static inline void __atmeth_set_va(void *arg, u64 va)
628  {
629  	struct rxe_atmeth *atmeth = arg;
630  
631  	atmeth->va = cpu_to_be64(va);
632  }
633  
__atmeth_rkey(void * arg)634  static inline u32 __atmeth_rkey(void *arg)
635  {
636  	struct rxe_atmeth *atmeth = arg;
637  
638  	return be32_to_cpu(atmeth->rkey);
639  }
640  
__atmeth_set_rkey(void * arg,u32 rkey)641  static inline void __atmeth_set_rkey(void *arg, u32 rkey)
642  {
643  	struct rxe_atmeth *atmeth = arg;
644  
645  	atmeth->rkey = cpu_to_be32(rkey);
646  }
647  
__atmeth_swap_add(void * arg)648  static inline u64 __atmeth_swap_add(void *arg)
649  {
650  	struct rxe_atmeth *atmeth = arg;
651  
652  	return be64_to_cpu(atmeth->swap_add);
653  }
654  
__atmeth_set_swap_add(void * arg,u64 swap_add)655  static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
656  {
657  	struct rxe_atmeth *atmeth = arg;
658  
659  	atmeth->swap_add = cpu_to_be64(swap_add);
660  }
661  
__atmeth_comp(void * arg)662  static inline u64 __atmeth_comp(void *arg)
663  {
664  	struct rxe_atmeth *atmeth = arg;
665  
666  	return be64_to_cpu(atmeth->comp);
667  }
668  
__atmeth_set_comp(void * arg,u64 comp)669  static inline void __atmeth_set_comp(void *arg, u64 comp)
670  {
671  	struct rxe_atmeth *atmeth = arg;
672  
673  	atmeth->comp = cpu_to_be64(comp);
674  }
675  
atmeth_va(struct rxe_pkt_info * pkt)676  static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
677  {
678  	return __atmeth_va(pkt->hdr +
679  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
680  }
681  
atmeth_set_va(struct rxe_pkt_info * pkt,u64 va)682  static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
683  {
684  	__atmeth_set_va(pkt->hdr +
685  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
686  }
687  
atmeth_rkey(struct rxe_pkt_info * pkt)688  static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
689  {
690  	return __atmeth_rkey(pkt->hdr +
691  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
692  }
693  
atmeth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)694  static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
695  {
696  	__atmeth_set_rkey(pkt->hdr +
697  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
698  }
699  
atmeth_swap_add(struct rxe_pkt_info * pkt)700  static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
701  {
702  	return __atmeth_swap_add(pkt->hdr +
703  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
704  }
705  
atmeth_set_swap_add(struct rxe_pkt_info * pkt,u64 swap_add)706  static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
707  {
708  	__atmeth_set_swap_add(pkt->hdr +
709  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
710  }
711  
atmeth_comp(struct rxe_pkt_info * pkt)712  static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
713  {
714  	return __atmeth_comp(pkt->hdr +
715  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
716  }
717  
atmeth_set_comp(struct rxe_pkt_info * pkt,u64 comp)718  static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
719  {
720  	__atmeth_set_comp(pkt->hdr +
721  		rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
722  }
723  
724  /******************************************************************************
725   * Ack Extended Transport Header
726   ******************************************************************************/
727  struct rxe_aeth {
728  	__be32			smsn;
729  };
730  
731  #define AETH_SYN_MASK		(0xff000000)
732  #define AETH_MSN_MASK		(0x00ffffff)
733  
734  enum aeth_syndrome {
735  	AETH_TYPE_MASK		= 0xe0,
736  	AETH_ACK		= 0x00,
737  	AETH_RNR_NAK		= 0x20,
738  	AETH_RSVD		= 0x40,
739  	AETH_NAK		= 0x60,
740  	AETH_ACK_UNLIMITED	= 0x1f,
741  	AETH_NAK_PSN_SEQ_ERROR	= 0x60,
742  	AETH_NAK_INVALID_REQ	= 0x61,
743  	AETH_NAK_REM_ACC_ERR	= 0x62,
744  	AETH_NAK_REM_OP_ERR	= 0x63,
745  	AETH_NAK_INV_RD_REQ	= 0x64,
746  };
747  
__aeth_syn(void * arg)748  static inline u8 __aeth_syn(void *arg)
749  {
750  	struct rxe_aeth *aeth = arg;
751  
752  	return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
753  }
754  
__aeth_set_syn(void * arg,u8 syn)755  static inline void __aeth_set_syn(void *arg, u8 syn)
756  {
757  	struct rxe_aeth *aeth = arg;
758  	u32 smsn = be32_to_cpu(aeth->smsn);
759  
760  	aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
761  			 (~AETH_SYN_MASK & smsn));
762  }
763  
__aeth_msn(void * arg)764  static inline u32 __aeth_msn(void *arg)
765  {
766  	struct rxe_aeth *aeth = arg;
767  
768  	return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
769  }
770  
__aeth_set_msn(void * arg,u32 msn)771  static inline void __aeth_set_msn(void *arg, u32 msn)
772  {
773  	struct rxe_aeth *aeth = arg;
774  	u32 smsn = be32_to_cpu(aeth->smsn);
775  
776  	aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
777  			 (~AETH_MSN_MASK & smsn));
778  }
779  
aeth_syn(struct rxe_pkt_info * pkt)780  static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
781  {
782  	return __aeth_syn(pkt->hdr +
783  		rxe_opcode[pkt->opcode].offset[RXE_AETH]);
784  }
785  
aeth_set_syn(struct rxe_pkt_info * pkt,u8 syn)786  static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
787  {
788  	__aeth_set_syn(pkt->hdr +
789  		rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
790  }
791  
aeth_msn(struct rxe_pkt_info * pkt)792  static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
793  {
794  	return __aeth_msn(pkt->hdr +
795  		rxe_opcode[pkt->opcode].offset[RXE_AETH]);
796  }
797  
aeth_set_msn(struct rxe_pkt_info * pkt,u32 msn)798  static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
799  {
800  	__aeth_set_msn(pkt->hdr +
801  		rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
802  }
803  
804  /******************************************************************************
805   * Atomic Ack Extended Transport Header
806   ******************************************************************************/
807  struct rxe_atmack {
808  	__be64			orig;
809  };
810  
__atmack_orig(void * arg)811  static inline u64 __atmack_orig(void *arg)
812  {
813  	struct rxe_atmack *atmack = arg;
814  
815  	return be64_to_cpu(atmack->orig);
816  }
817  
__atmack_set_orig(void * arg,u64 orig)818  static inline void __atmack_set_orig(void *arg, u64 orig)
819  {
820  	struct rxe_atmack *atmack = arg;
821  
822  	atmack->orig = cpu_to_be64(orig);
823  }
824  
atmack_orig(struct rxe_pkt_info * pkt)825  static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
826  {
827  	return __atmack_orig(pkt->hdr +
828  		rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
829  }
830  
atmack_set_orig(struct rxe_pkt_info * pkt,u64 orig)831  static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
832  {
833  	__atmack_set_orig(pkt->hdr +
834  		rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
835  }
836  
837  /******************************************************************************
838   * Immediate Extended Transport Header
839   ******************************************************************************/
840  struct rxe_immdt {
841  	__be32			imm;
842  };
843  
__immdt_imm(void * arg)844  static inline __be32 __immdt_imm(void *arg)
845  {
846  	struct rxe_immdt *immdt = arg;
847  
848  	return immdt->imm;
849  }
850  
__immdt_set_imm(void * arg,__be32 imm)851  static inline void __immdt_set_imm(void *arg, __be32 imm)
852  {
853  	struct rxe_immdt *immdt = arg;
854  
855  	immdt->imm = imm;
856  }
857  
immdt_imm(struct rxe_pkt_info * pkt)858  static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
859  {
860  	return __immdt_imm(pkt->hdr +
861  		rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
862  }
863  
immdt_set_imm(struct rxe_pkt_info * pkt,__be32 imm)864  static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
865  {
866  	__immdt_set_imm(pkt->hdr +
867  		rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
868  }
869  
870  /******************************************************************************
871   * Invalidate Extended Transport Header
872   ******************************************************************************/
873  struct rxe_ieth {
874  	__be32			rkey;
875  };
876  
__ieth_rkey(void * arg)877  static inline u32 __ieth_rkey(void *arg)
878  {
879  	struct rxe_ieth *ieth = arg;
880  
881  	return be32_to_cpu(ieth->rkey);
882  }
883  
__ieth_set_rkey(void * arg,u32 rkey)884  static inline void __ieth_set_rkey(void *arg, u32 rkey)
885  {
886  	struct rxe_ieth *ieth = arg;
887  
888  	ieth->rkey = cpu_to_be32(rkey);
889  }
890  
ieth_rkey(struct rxe_pkt_info * pkt)891  static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
892  {
893  	return __ieth_rkey(pkt->hdr +
894  		rxe_opcode[pkt->opcode].offset[RXE_IETH]);
895  }
896  
ieth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)897  static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
898  {
899  	__ieth_set_rkey(pkt->hdr +
900  		rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
901  }
902  
903  enum rxe_hdr_length {
904  	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
905  	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
906  	RXE_IMMDT_BYTES		= sizeof(struct rxe_immdt),
907  	RXE_RETH_BYTES		= sizeof(struct rxe_reth),
908  	RXE_AETH_BYTES		= sizeof(struct rxe_aeth),
909  	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
910  	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
911  	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
912  	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
913  };
914  
header_size(struct rxe_pkt_info * pkt)915  static inline size_t header_size(struct rxe_pkt_info *pkt)
916  {
917  	return rxe_opcode[pkt->opcode].length;
918  }
919  
payload_addr(struct rxe_pkt_info * pkt)920  static inline void *payload_addr(struct rxe_pkt_info *pkt)
921  {
922  	return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
923  }
924  
payload_size(struct rxe_pkt_info * pkt)925  static inline size_t payload_size(struct rxe_pkt_info *pkt)
926  {
927  	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
928  		- bth_pad(pkt) - RXE_ICRC_SIZE;
929  }
930  
931  #endif /* RXE_HDR_H */
932