1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include "wq.h"
35 #include "mlx5_core.h"
36
mlx5_wq_cyc_get_size(struct mlx5_wq_cyc * wq)37 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
38 {
39 return (u32)wq->fbc.sz_m1 + 1;
40 }
41
mlx5_cqwq_get_size(struct mlx5_cqwq * wq)42 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
43 {
44 return wq->fbc.sz_m1 + 1;
45 }
46
mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq * wq)47 u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
48 {
49 return wq->fbc.log_stride;
50 }
51
mlx5_wq_ll_get_size(struct mlx5_wq_ll * wq)52 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
53 {
54 return (u32)wq->fbc.sz_m1 + 1;
55 }
56
wq_get_byte_sz(u8 log_sz,u8 log_stride)57 static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
58 {
59 return ((u32)1 << log_sz) << log_stride;
60 }
61
mlx5_wq_cyc_create(struct mlx5_core_dev * mdev,struct mlx5_wq_param * param,void * wqc,struct mlx5_wq_cyc * wq,struct mlx5_wq_ctrl * wq_ctrl)62 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
63 void *wqc, struct mlx5_wq_cyc *wq,
64 struct mlx5_wq_ctrl *wq_ctrl)
65 {
66 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
67 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
68 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
69 int err;
70
71 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
72 if (err) {
73 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
74 return err;
75 }
76
77 wq->db = wq_ctrl->db.db;
78
79 err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
80 &wq_ctrl->buf, param->buf_numa_node);
81 if (err) {
82 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
83 goto err_db_free;
84 }
85
86 mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
87 wq->sz = mlx5_wq_cyc_get_size(wq);
88
89 wq_ctrl->mdev = mdev;
90
91 return 0;
92
93 err_db_free:
94 mlx5_db_free(mdev, &wq_ctrl->db);
95
96 return err;
97 }
98
mlx5_wq_qp_create(struct mlx5_core_dev * mdev,struct mlx5_wq_param * param,void * qpc,struct mlx5_wq_qp * wq,struct mlx5_wq_ctrl * wq_ctrl)99 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
100 void *qpc, struct mlx5_wq_qp *wq,
101 struct mlx5_wq_ctrl *wq_ctrl)
102 {
103 u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
104 u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
105 u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
106 u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
107
108 u32 rq_byte_size;
109 int err;
110
111
112
113 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
114 if (err) {
115 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
116 return err;
117 }
118
119 err = mlx5_frag_buf_alloc_node(mdev,
120 wq_get_byte_sz(log_rq_sz, log_rq_stride) +
121 wq_get_byte_sz(log_sq_sz, log_sq_stride),
122 &wq_ctrl->buf, param->buf_numa_node);
123 if (err) {
124 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
125 goto err_db_free;
126 }
127
128 mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
129
130 rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
131
132 if (rq_byte_size < PAGE_SIZE) {
133 /* SQ starts within the same page of the RQ */
134 u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
135
136 mlx5_init_fbc_offset(wq_ctrl->buf.frags,
137 log_sq_stride, log_sq_sz, sq_strides_offset,
138 &wq->sq.fbc);
139 } else {
140 u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
141
142 mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
143 log_sq_stride, log_sq_sz, &wq->sq.fbc);
144 }
145
146 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
147 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
148
149 wq_ctrl->mdev = mdev;
150
151 return 0;
152
153 err_db_free:
154 mlx5_db_free(mdev, &wq_ctrl->db);
155
156 return err;
157 }
158
mlx5_cqwq_create(struct mlx5_core_dev * mdev,struct mlx5_wq_param * param,void * cqc,struct mlx5_cqwq * wq,struct mlx5_wq_ctrl * wq_ctrl)159 int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
160 void *cqc, struct mlx5_cqwq *wq,
161 struct mlx5_wq_ctrl *wq_ctrl)
162 {
163 /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
164 u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
165 u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
166 int err;
167
168 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
169 if (err) {
170 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
171 return err;
172 }
173
174 wq->db = wq_ctrl->db.db;
175
176 err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
177 &wq_ctrl->buf,
178 param->buf_numa_node);
179 if (err) {
180 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
181 err);
182 goto err_db_free;
183 }
184
185 mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
186
187 wq_ctrl->mdev = mdev;
188
189 return 0;
190
191 err_db_free:
192 mlx5_db_free(mdev, &wq_ctrl->db);
193
194 return err;
195 }
196
mlx5_wq_ll_create(struct mlx5_core_dev * mdev,struct mlx5_wq_param * param,void * wqc,struct mlx5_wq_ll * wq,struct mlx5_wq_ctrl * wq_ctrl)197 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
198 void *wqc, struct mlx5_wq_ll *wq,
199 struct mlx5_wq_ctrl *wq_ctrl)
200 {
201 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
202 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
203 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
204 struct mlx5_wqe_srq_next_seg *next_seg;
205 int err;
206 int i;
207
208 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
209 if (err) {
210 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
211 return err;
212 }
213
214 wq->db = wq_ctrl->db.db;
215
216 err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
217 &wq_ctrl->buf, param->buf_numa_node);
218 if (err) {
219 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
220 goto err_db_free;
221 }
222
223 mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
224
225 for (i = 0; i < fbc->sz_m1; i++) {
226 next_seg = mlx5_wq_ll_get_wqe(wq, i);
227 next_seg->next_wqe_index = cpu_to_be16(i + 1);
228 }
229 next_seg = mlx5_wq_ll_get_wqe(wq, i);
230 wq->tail_next = &next_seg->next_wqe_index;
231
232 wq_ctrl->mdev = mdev;
233
234 return 0;
235
236 err_db_free:
237 mlx5_db_free(mdev, &wq_ctrl->db);
238
239 return err;
240 }
241
mlx5_wq_destroy(struct mlx5_wq_ctrl * wq_ctrl)242 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
243 {
244 mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
245 mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
246 }
247
248