1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4  */
5 
6 #ifndef _MLX5_IB_WR_H
7 #define _MLX5_IB_WR_H
8 
9 #include "mlx5_ib.h"
10 
11 enum {
12 	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
13 };
14 
15 struct mlx5_wqe_eth_pad {
16 	u8 rsvd0[16];
17 };
18 
19 
20 /* get_sq_edge - Get the next nearby edge.
21  *
22  * An 'edge' is defined as the first following address after the end
23  * of the fragment or the SQ. Accordingly, during the WQE construction
24  * which repetitively increases the pointer to write the next data, it
25  * simply should check if it gets to an edge.
26  *
27  * @sq - SQ buffer.
28  * @idx - Stride index in the SQ buffer.
29  *
30  * Return:
31  *	The new edge.
32  */
get_sq_edge(struct mlx5_ib_wq * sq,u32 idx)33 static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
34 {
35 	void *fragment_end;
36 
37 	fragment_end = mlx5_frag_buf_get_wqe
38 		(&sq->fbc,
39 		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
40 
41 	return fragment_end + MLX5_SEND_WQE_BB;
42 }
43 
44 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
45 		      const struct ib_send_wr **bad_wr, bool drain);
46 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
47 		      const struct ib_recv_wr **bad_wr, bool drain);
48 
mlx5_ib_post_send_nodrain(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)49 static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
50 					    const struct ib_send_wr *wr,
51 					    const struct ib_send_wr **bad_wr)
52 {
53 	return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
54 }
55 
mlx5_ib_post_send_drain(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)56 static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
57 					  const struct ib_send_wr *wr,
58 					  const struct ib_send_wr **bad_wr)
59 {
60 	return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
61 }
62 
mlx5_ib_post_recv_nodrain(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)63 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
64 					    const struct ib_recv_wr *wr,
65 					    const struct ib_recv_wr **bad_wr)
66 {
67 	return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
68 }
69 
mlx5_ib_post_recv_drain(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)70 static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
71 					  const struct ib_recv_wr *wr,
72 					  const struct ib_recv_wr **bad_wr)
73 {
74 	return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
75 }
76 #endif /* _MLX5_IB_WR_H */
77