1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
40 #include <linux/mlx5/transobj.h>
41 
mlx5_srq_event(struct mlx5_core_dev * dev,u32 srqn,int event_type)42 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
43 {
44 	struct mlx5_srq_table *table = &dev->priv.srq_table;
45 	struct mlx5_core_srq *srq;
46 
47 	spin_lock(&table->lock);
48 
49 	srq = radix_tree_lookup(&table->tree, srqn);
50 	if (srq)
51 		atomic_inc(&srq->refcount);
52 
53 	spin_unlock(&table->lock);
54 
55 	if (!srq) {
56 		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
57 		return;
58 	}
59 
60 	srq->event(srq, event_type);
61 
62 	if (atomic_dec_and_test(&srq->refcount))
63 		complete(&srq->free);
64 }
65 
get_pas_size(struct mlx5_srq_attr * in)66 static int get_pas_size(struct mlx5_srq_attr *in)
67 {
68 	u32 log_page_size = in->log_page_size + 12;
69 	u32 log_srq_size  = in->log_size;
70 	u32 log_rq_stride = in->wqe_shift;
71 	u32 page_offset   = in->page_offset;
72 	u32 po_quanta	  = 1 << (log_page_size - 6);
73 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
74 	u32 page_size	  = 1 << log_page_size;
75 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
76 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
77 
78 	return rq_num_pas * sizeof(u64);
79 }
80 
set_wq(void * wq,struct mlx5_srq_attr * in)81 static void set_wq(void *wq, struct mlx5_srq_attr *in)
82 {
83 	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
84 		 & MLX5_SRQ_FLAG_WQ_SIG));
85 	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
86 	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
87 	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
88 	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
89 	MLX5_SET(wq,   wq, lwm,		  in->lwm);
90 	MLX5_SET(wq,   wq, pd,		  in->pd);
91 	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
92 }
93 
set_srqc(void * srqc,struct mlx5_srq_attr * in)94 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
95 {
96 	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
97 		 & MLX5_SRQ_FLAG_WQ_SIG));
98 	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
99 	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
100 	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
101 	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
102 	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
103 	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
104 	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
105 	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
106 	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
107 }
108 
get_wq(void * wq,struct mlx5_srq_attr * in)109 static void get_wq(void *wq, struct mlx5_srq_attr *in)
110 {
111 	if (MLX5_GET(wq, wq, wq_signature))
112 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
113 	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
114 	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
115 	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
116 	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
117 	in->lwm		  = MLX5_GET(wq,   wq, lwm);
118 	in->pd		  = MLX5_GET(wq,   wq, pd);
119 	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
120 }
121 
get_srqc(void * srqc,struct mlx5_srq_attr * in)122 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
123 {
124 	if (MLX5_GET(srqc, srqc, wq_signature))
125 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
126 	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
127 	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
128 	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
129 	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
130 	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
131 	in->pd		  = MLX5_GET(srqc,   srqc, pd);
132 	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
133 }
134 
mlx5_core_get_srq(struct mlx5_core_dev * dev,u32 srqn)135 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
136 {
137 	struct mlx5_srq_table *table = &dev->priv.srq_table;
138 	struct mlx5_core_srq *srq;
139 
140 	spin_lock(&table->lock);
141 
142 	srq = radix_tree_lookup(&table->tree, srqn);
143 	if (srq)
144 		atomic_inc(&srq->refcount);
145 
146 	spin_unlock(&table->lock);
147 
148 	return srq;
149 }
150 EXPORT_SYMBOL(mlx5_core_get_srq);
151 
create_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)152 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
153 			  struct mlx5_srq_attr *in)
154 {
155 	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
156 	void *create_in;
157 	void *srqc;
158 	void *pas;
159 	int pas_size;
160 	int inlen;
161 	int err;
162 
163 	pas_size  = get_pas_size(in);
164 	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
165 	create_in = kvzalloc(inlen, GFP_KERNEL);
166 	if (!create_in)
167 		return -ENOMEM;
168 
169 	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
170 	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
171 
172 	set_srqc(srqc, in);
173 	memcpy(pas, in->pas, pas_size);
174 
175 	MLX5_SET(create_srq_in, create_in, opcode,
176 		 MLX5_CMD_OP_CREATE_SRQ);
177 
178 	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
179 			    sizeof(create_out));
180 	kvfree(create_in);
181 	if (!err)
182 		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
183 
184 	return err;
185 }
186 
destroy_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)187 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
188 			   struct mlx5_core_srq *srq)
189 {
190 	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
191 	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
192 
193 	MLX5_SET(destroy_srq_in, srq_in, opcode,
194 		 MLX5_CMD_OP_DESTROY_SRQ);
195 	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
196 
197 	return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
198 			     srq_out, sizeof(srq_out));
199 }
200 
arm_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)201 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
202 		       u16 lwm, int is_srq)
203 {
204 	u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
205 	u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
206 
207 	MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
208 	MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
209 	MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
210 	MLX5_SET(arm_rq_in, srq_in, lwm,      lwm);
211 
212 	return  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
213 			      srq_out, sizeof(srq_out));
214 }
215 
query_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)216 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
217 			 struct mlx5_srq_attr *out)
218 {
219 	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
220 	u32 *srq_out;
221 	void *srqc;
222 	int err;
223 
224 	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
225 	if (!srq_out)
226 		return -ENOMEM;
227 
228 	MLX5_SET(query_srq_in, srq_in, opcode,
229 		 MLX5_CMD_OP_QUERY_SRQ);
230 	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
231 	err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
232 			     srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
233 	if (err)
234 		goto out;
235 
236 	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
237 	get_srqc(srqc, out);
238 	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
239 		out->flags |= MLX5_SRQ_FLAG_ERR;
240 out:
241 	kvfree(srq_out);
242 	return err;
243 }
244 
create_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)245 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
246 			      struct mlx5_core_srq *srq,
247 			      struct mlx5_srq_attr *in)
248 {
249 	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
250 	void *create_in;
251 	void *xrc_srqc;
252 	void *pas;
253 	int pas_size;
254 	int inlen;
255 	int err;
256 
257 	pas_size  = get_pas_size(in);
258 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
259 	create_in = kvzalloc(inlen, GFP_KERNEL);
260 	if (!create_in)
261 		return -ENOMEM;
262 
263 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
264 				xrc_srq_context_entry);
265 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
266 
267 	set_srqc(xrc_srqc, in);
268 	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
269 	memcpy(pas, in->pas, pas_size);
270 	MLX5_SET(create_xrc_srq_in, create_in, opcode,
271 		 MLX5_CMD_OP_CREATE_XRC_SRQ);
272 
273 	memset(create_out, 0, sizeof(create_out));
274 	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
275 			    sizeof(create_out));
276 	if (err)
277 		goto out;
278 
279 	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
280 out:
281 	kvfree(create_in);
282 	return err;
283 }
284 
destroy_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)285 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
286 			       struct mlx5_core_srq *srq)
287 {
288 	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
289 	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
290 
291 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
292 		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
293 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
294 
295 	return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
296 			     xrcsrq_out, sizeof(xrcsrq_out));
297 }
298 
arm_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)299 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
300 			   struct mlx5_core_srq *srq, u16 lwm)
301 {
302 	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
303 	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
304 
305 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
306 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
307 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
308 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
309 
310 	return  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
311 			      xrcsrq_out, sizeof(xrcsrq_out));
312 }
313 
query_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)314 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
315 			     struct mlx5_core_srq *srq,
316 			     struct mlx5_srq_attr *out)
317 {
318 	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
319 	u32 *xrcsrq_out;
320 	void *xrc_srqc;
321 	int err;
322 
323 	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
324 	if (!xrcsrq_out)
325 		return -ENOMEM;
326 	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
327 
328 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
329 		 MLX5_CMD_OP_QUERY_XRC_SRQ);
330 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
331 
332 	err =  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
333 			     MLX5_ST_SZ_BYTES(query_xrc_srq_out));
334 	if (err)
335 		goto out;
336 
337 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
338 				xrc_srq_context_entry);
339 	get_srqc(xrc_srqc, out);
340 	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
341 		out->flags |= MLX5_SRQ_FLAG_ERR;
342 
343 out:
344 	kvfree(xrcsrq_out);
345 	return err;
346 }
347 
create_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)348 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
349 			  struct mlx5_srq_attr *in)
350 {
351 	void *create_in;
352 	void *rmpc;
353 	void *wq;
354 	int pas_size;
355 	int inlen;
356 	int err;
357 
358 	pas_size = get_pas_size(in);
359 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
360 	create_in = kvzalloc(inlen, GFP_KERNEL);
361 	if (!create_in)
362 		return -ENOMEM;
363 
364 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
365 	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
366 
367 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
368 	set_wq(wq, in);
369 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
370 
371 	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
372 
373 	kvfree(create_in);
374 	return err;
375 }
376 
destroy_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)377 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
378 			   struct mlx5_core_srq *srq)
379 {
380 	return mlx5_core_destroy_rmp(dev, srq->srqn);
381 }
382 
arm_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)383 static int arm_rmp_cmd(struct mlx5_core_dev *dev,
384 		       struct mlx5_core_srq *srq,
385 		       u16 lwm)
386 {
387 	void *in;
388 	void *rmpc;
389 	void *wq;
390 	void *bitmask;
391 	int err;
392 
393 	in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
394 	if (!in)
395 		return -ENOMEM;
396 
397 	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
398 	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
399 	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
400 
401 	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
402 	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
403 	MLX5_SET(wq,		wq,	 lwm,	    lwm);
404 	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
405 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
406 
407 	err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
408 
409 	kvfree(in);
410 	return err;
411 }
412 
query_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)413 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
414 			 struct mlx5_srq_attr *out)
415 {
416 	u32 *rmp_out;
417 	void *rmpc;
418 	int err;
419 
420 	rmp_out =  kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
421 	if (!rmp_out)
422 		return -ENOMEM;
423 
424 	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
425 	if (err)
426 		goto out;
427 
428 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
429 	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
430 	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
431 		out->flags |= MLX5_SRQ_FLAG_ERR;
432 
433 out:
434 	kvfree(rmp_out);
435 	return err;
436 }
437 
create_xrq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)438 static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
439 			  struct mlx5_srq_attr *in)
440 {
441 	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
442 	void *create_in;
443 	void *xrqc;
444 	void *wq;
445 	int pas_size;
446 	int inlen;
447 	int err;
448 
449 	pas_size = get_pas_size(in);
450 	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
451 	create_in = kvzalloc(inlen, GFP_KERNEL);
452 	if (!create_in)
453 		return -ENOMEM;
454 
455 	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
456 	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
457 
458 	set_wq(wq, in);
459 	memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
460 
461 	if (in->type == IB_SRQT_TM) {
462 		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
463 		if (in->flags & MLX5_SRQ_FLAG_RNDV)
464 			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
465 		MLX5_SET(xrqc, xrqc,
466 			 tag_matching_topology_context.log_matching_list_sz,
467 			 in->tm_log_list_size);
468 	}
469 	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
470 	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
471 	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
472 	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
473 			    sizeof(create_out));
474 	kvfree(create_in);
475 	if (!err)
476 		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
477 
478 	return err;
479 }
480 
destroy_xrq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)481 static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
482 {
483 	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
484 	u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
485 
486 	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
487 	MLX5_SET(destroy_xrq_in, in, xrqn,   srq->srqn);
488 
489 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
490 }
491 
arm_xrq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)492 static int arm_xrq_cmd(struct mlx5_core_dev *dev,
493 		       struct mlx5_core_srq *srq,
494 		       u16 lwm)
495 {
496 	u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
497 	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
498 
499 	MLX5_SET(arm_rq_in, in, opcode,     MLX5_CMD_OP_ARM_RQ);
500 	MLX5_SET(arm_rq_in, in, op_mod,     MLX5_ARM_RQ_IN_OP_MOD_XRQ);
501 	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
502 	MLX5_SET(arm_rq_in, in, lwm,	    lwm);
503 
504 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
505 }
506 
query_xrq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)507 static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
508 			 struct mlx5_srq_attr *out)
509 {
510 	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
511 	u32 *xrq_out;
512 	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
513 	void *xrqc;
514 	int err;
515 
516 	xrq_out = kvzalloc(outlen, GFP_KERNEL);
517 	if (!xrq_out)
518 		return -ENOMEM;
519 
520 	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
521 	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
522 
523 	err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen);
524 	if (err)
525 		goto out;
526 
527 	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
528 	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
529 	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
530 		out->flags |= MLX5_SRQ_FLAG_ERR;
531 	out->tm_next_tag =
532 		MLX5_GET(xrqc, xrqc,
533 			 tag_matching_topology_context.append_next_index);
534 	out->tm_hw_phase_cnt =
535 		MLX5_GET(xrqc, xrqc,
536 			 tag_matching_topology_context.hw_phase_cnt);
537 	out->tm_sw_phase_cnt =
538 		MLX5_GET(xrqc, xrqc,
539 			 tag_matching_topology_context.sw_phase_cnt);
540 
541 out:
542 	kvfree(xrq_out);
543 	return err;
544 }
545 
create_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)546 static int create_srq_split(struct mlx5_core_dev *dev,
547 			    struct mlx5_core_srq *srq,
548 			    struct mlx5_srq_attr *in)
549 {
550 	if (!dev->issi)
551 		return create_srq_cmd(dev, srq, in);
552 	switch (srq->common.res) {
553 	case MLX5_RES_XSRQ:
554 		return create_xrc_srq_cmd(dev, srq, in);
555 	case MLX5_RES_XRQ:
556 		return create_xrq_cmd(dev, srq, in);
557 	default:
558 		return create_rmp_cmd(dev, srq, in);
559 	}
560 }
561 
destroy_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)562 static int destroy_srq_split(struct mlx5_core_dev *dev,
563 			     struct mlx5_core_srq *srq)
564 {
565 	if (!dev->issi)
566 		return destroy_srq_cmd(dev, srq);
567 	switch (srq->common.res) {
568 	case MLX5_RES_XSRQ:
569 		return destroy_xrc_srq_cmd(dev, srq);
570 	case MLX5_RES_XRQ:
571 		return destroy_xrq_cmd(dev, srq);
572 	default:
573 		return destroy_rmp_cmd(dev, srq);
574 	}
575 }
576 
mlx5_core_create_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)577 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
578 			 struct mlx5_srq_attr *in)
579 {
580 	int err;
581 	struct mlx5_srq_table *table = &dev->priv.srq_table;
582 
583 	switch (in->type) {
584 	case IB_SRQT_XRC:
585 		srq->common.res = MLX5_RES_XSRQ;
586 		break;
587 	case IB_SRQT_TM:
588 		srq->common.res = MLX5_RES_XRQ;
589 		break;
590 	default:
591 		srq->common.res = MLX5_RES_SRQ;
592 	}
593 
594 	err = create_srq_split(dev, srq, in);
595 	if (err)
596 		return err;
597 
598 	atomic_set(&srq->refcount, 1);
599 	init_completion(&srq->free);
600 
601 	spin_lock_irq(&table->lock);
602 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
603 	spin_unlock_irq(&table->lock);
604 	if (err) {
605 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
606 		goto err_destroy_srq_split;
607 	}
608 
609 	return 0;
610 
611 err_destroy_srq_split:
612 	destroy_srq_split(dev, srq);
613 
614 	return err;
615 }
616 EXPORT_SYMBOL(mlx5_core_create_srq);
617 
mlx5_core_destroy_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)618 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
619 {
620 	struct mlx5_srq_table *table = &dev->priv.srq_table;
621 	struct mlx5_core_srq *tmp;
622 	int err;
623 
624 	spin_lock_irq(&table->lock);
625 	tmp = radix_tree_delete(&table->tree, srq->srqn);
626 	spin_unlock_irq(&table->lock);
627 	if (!tmp) {
628 		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
629 		return -EINVAL;
630 	}
631 	if (tmp != srq) {
632 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
633 		return -EINVAL;
634 	}
635 
636 	err = destroy_srq_split(dev, srq);
637 	if (err)
638 		return err;
639 
640 	if (atomic_dec_and_test(&srq->refcount))
641 		complete(&srq->free);
642 	wait_for_completion(&srq->free);
643 
644 	return 0;
645 }
646 EXPORT_SYMBOL(mlx5_core_destroy_srq);
647 
mlx5_core_query_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)648 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
649 			struct mlx5_srq_attr *out)
650 {
651 	if (!dev->issi)
652 		return query_srq_cmd(dev, srq, out);
653 	switch (srq->common.res) {
654 	case MLX5_RES_XSRQ:
655 		return query_xrc_srq_cmd(dev, srq, out);
656 	case MLX5_RES_XRQ:
657 		return query_xrq_cmd(dev, srq, out);
658 	default:
659 		return query_rmp_cmd(dev, srq, out);
660 	}
661 }
662 EXPORT_SYMBOL(mlx5_core_query_srq);
663 
mlx5_core_arm_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)664 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
665 		      u16 lwm, int is_srq)
666 {
667 	if (!dev->issi)
668 		return arm_srq_cmd(dev, srq, lwm, is_srq);
669 	switch (srq->common.res) {
670 	case MLX5_RES_XSRQ:
671 		return arm_xrc_srq_cmd(dev, srq, lwm);
672 	case MLX5_RES_XRQ:
673 		return arm_xrq_cmd(dev, srq, lwm);
674 	default:
675 		return arm_rmp_cmd(dev, srq, lwm);
676 	}
677 }
678 EXPORT_SYMBOL(mlx5_core_arm_srq);
679 
mlx5_init_srq_table(struct mlx5_core_dev * dev)680 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
681 {
682 	struct mlx5_srq_table *table = &dev->priv.srq_table;
683 
684 	memset(table, 0, sizeof(*table));
685 	spin_lock_init(&table->lock);
686 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
687 }
688 
mlx5_cleanup_srq_table(struct mlx5_core_dev * dev)689 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
690 {
691 	/* nothing */
692 }
693