1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
39 
40 #include "mlx5_core.h"
41 
mlx5_get_rsc(struct mlx5_core_dev * dev,u32 rsn)42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 						 u32 rsn)
44 {
45 	struct mlx5_qp_table *table = &dev->priv.qp_table;
46 	struct mlx5_core_rsc_common *common;
47 
48 	spin_lock(&table->lock);
49 
50 	common = radix_tree_lookup(&table->tree, rsn);
51 	if (common)
52 		atomic_inc(&common->refcount);
53 
54 	spin_unlock(&table->lock);
55 
56 	if (!common) {
57 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
58 			       rsn);
59 		return NULL;
60 	}
61 	return common;
62 }
63 
mlx5_core_put_rsc(struct mlx5_core_rsc_common * common)64 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
65 {
66 	if (atomic_dec_and_test(&common->refcount))
67 		complete(&common->free);
68 }
69 
qp_allowed_event_types(void)70 static u64 qp_allowed_event_types(void)
71 {
72 	u64 mask;
73 
74 	mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
75 	       BIT(MLX5_EVENT_TYPE_COMM_EST) |
76 	       BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
77 	       BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
78 	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
79 	       BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
80 	       BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
81 	       BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
82 
83 	return mask;
84 }
85 
rq_allowed_event_types(void)86 static u64 rq_allowed_event_types(void)
87 {
88 	u64 mask;
89 
90 	mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
91 	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
92 
93 	return mask;
94 }
95 
sq_allowed_event_types(void)96 static u64 sq_allowed_event_types(void)
97 {
98 	return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
99 }
100 
dct_allowed_event_types(void)101 static u64 dct_allowed_event_types(void)
102 {
103 	return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
104 }
105 
is_event_type_allowed(int rsc_type,int event_type)106 static bool is_event_type_allowed(int rsc_type, int event_type)
107 {
108 	switch (rsc_type) {
109 	case MLX5_EVENT_QUEUE_TYPE_QP:
110 		return BIT(event_type) & qp_allowed_event_types();
111 	case MLX5_EVENT_QUEUE_TYPE_RQ:
112 		return BIT(event_type) & rq_allowed_event_types();
113 	case MLX5_EVENT_QUEUE_TYPE_SQ:
114 		return BIT(event_type) & sq_allowed_event_types();
115 	case MLX5_EVENT_QUEUE_TYPE_DCT:
116 		return BIT(event_type) & dct_allowed_event_types();
117 	default:
118 		WARN(1, "Event arrived for unknown resource type");
119 		return false;
120 	}
121 }
122 
mlx5_rsc_event(struct mlx5_core_dev * dev,u32 rsn,int event_type)123 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
124 {
125 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
126 	struct mlx5_core_dct *dct;
127 	struct mlx5_core_qp *qp;
128 
129 	if (!common)
130 		return;
131 
132 	if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
133 		mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
134 			       event_type, rsn);
135 		return;
136 	}
137 
138 	switch (common->res) {
139 	case MLX5_RES_QP:
140 	case MLX5_RES_RQ:
141 	case MLX5_RES_SQ:
142 		qp = (struct mlx5_core_qp *)common;
143 		qp->event(qp, event_type);
144 		break;
145 	case MLX5_RES_DCT:
146 		dct = (struct mlx5_core_dct *)common;
147 		if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
148 			complete(&dct->drained);
149 		break;
150 	default:
151 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
152 	}
153 
154 	mlx5_core_put_rsc(common);
155 }
156 
create_resource_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int rsc_type)157 static int create_resource_common(struct mlx5_core_dev *dev,
158 				  struct mlx5_core_qp *qp,
159 				  int rsc_type)
160 {
161 	struct mlx5_qp_table *table = &dev->priv.qp_table;
162 	int err;
163 
164 	qp->common.res = rsc_type;
165 	spin_lock_irq(&table->lock);
166 	err = radix_tree_insert(&table->tree,
167 				qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
168 				qp);
169 	spin_unlock_irq(&table->lock);
170 	if (err)
171 		return err;
172 
173 	atomic_set(&qp->common.refcount, 1);
174 	init_completion(&qp->common.free);
175 	qp->pid = current->pid;
176 
177 	return 0;
178 }
179 
destroy_resource_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)180 static void destroy_resource_common(struct mlx5_core_dev *dev,
181 				    struct mlx5_core_qp *qp)
182 {
183 	struct mlx5_qp_table *table = &dev->priv.qp_table;
184 	unsigned long flags;
185 
186 	spin_lock_irqsave(&table->lock, flags);
187 	radix_tree_delete(&table->tree,
188 			  qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
189 	spin_unlock_irqrestore(&table->lock, flags);
190 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
191 	wait_for_completion(&qp->common.free);
192 }
193 
mlx5_core_create_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct,u32 * in,int inlen)194 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
195 			 struct mlx5_core_dct *dct,
196 			 u32 *in, int inlen)
197 {
198 	u32 out[MLX5_ST_SZ_DW(create_dct_out)]   = {0};
199 	u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
200 	u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
201 	struct mlx5_core_qp *qp = &dct->mqp;
202 	int err;
203 
204 	init_completion(&dct->drained);
205 	MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
206 
207 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
208 	if (err) {
209 		mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
210 		return err;
211 	}
212 
213 	qp->qpn = MLX5_GET(create_dct_out, out, dctn);
214 	err = create_resource_common(dev, qp, MLX5_RES_DCT);
215 	if (err)
216 		goto err_cmd;
217 
218 	return 0;
219 err_cmd:
220 	MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
221 	MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
222 	mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
223 		      (void *)&out, sizeof(dout));
224 	return err;
225 }
226 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
227 
mlx5_core_create_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * in,int inlen)228 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
229 			struct mlx5_core_qp *qp,
230 			u32 *in, int inlen)
231 {
232 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
233 	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
234 	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
235 	int err;
236 
237 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
238 
239 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
240 	if (err)
241 		return err;
242 
243 	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
244 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
245 
246 	err = create_resource_common(dev, qp, MLX5_RES_QP);
247 	if (err)
248 		goto err_cmd;
249 
250 	err = mlx5_debug_qp_add(dev, qp);
251 	if (err)
252 		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
253 			      qp->qpn);
254 
255 	atomic_inc(&dev->num_qps);
256 
257 	return 0;
258 
259 err_cmd:
260 	memset(din, 0, sizeof(din));
261 	memset(dout, 0, sizeof(dout));
262 	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
263 	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
264 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
265 	return err;
266 }
267 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
268 
mlx5_core_drain_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct)269 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
270 			       struct mlx5_core_dct *dct)
271 {
272 	u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
273 	u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
274 	struct mlx5_core_qp *qp = &dct->mqp;
275 
276 	MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
277 	MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
278 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
279 			     (void *)&out, sizeof(out));
280 }
281 
mlx5_core_destroy_dct(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct)282 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
283 			  struct mlx5_core_dct *dct)
284 {
285 	u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
286 	u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
287 	struct mlx5_core_qp *qp = &dct->mqp;
288 	int err;
289 
290 	err = mlx5_core_drain_dct(dev, dct);
291 	if (err) {
292 		if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
293 			goto destroy;
294 		} else {
295 			mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
296 			return err;
297 		}
298 	}
299 	wait_for_completion(&dct->drained);
300 destroy:
301 	destroy_resource_common(dev, &dct->mqp);
302 	MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
303 	MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
304 	err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
305 			    (void *)&out, sizeof(out));
306 	return err;
307 }
308 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
309 
mlx5_core_destroy_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)310 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
311 			 struct mlx5_core_qp *qp)
312 {
313 	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
314 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
315 	int err;
316 
317 	mlx5_debug_qp_remove(dev, qp);
318 
319 	destroy_resource_common(dev, qp);
320 
321 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
322 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
323 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
324 	if (err)
325 		return err;
326 
327 	atomic_dec(&dev->num_qps);
328 	return 0;
329 }
330 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
331 
mlx5_core_set_delay_drop(struct mlx5_core_dev * dev,u32 timeout_usec)332 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
333 			     u32 timeout_usec)
334 {
335 	u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
336 	u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)]   = {0};
337 
338 	MLX5_SET(set_delay_drop_params_in, in, opcode,
339 		 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
340 	MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
341 		 timeout_usec / 100);
342 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
343 }
344 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
345 
346 struct mbox_info {
347 	u32 *in;
348 	u32 *out;
349 	int inlen;
350 	int outlen;
351 };
352 
mbox_alloc(struct mbox_info * mbox,int inlen,int outlen)353 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
354 {
355 	mbox->inlen  = inlen;
356 	mbox->outlen = outlen;
357 	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
358 	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
359 	if (!mbox->in || !mbox->out) {
360 		kfree(mbox->in);
361 		kfree(mbox->out);
362 		return -ENOMEM;
363 	}
364 
365 	return 0;
366 }
367 
mbox_free(struct mbox_info * mbox)368 static void mbox_free(struct mbox_info *mbox)
369 {
370 	kfree(mbox->in);
371 	kfree(mbox->out);
372 }
373 
modify_qp_mbox_alloc(struct mlx5_core_dev * dev,u16 opcode,int qpn,u32 opt_param_mask,void * qpc,struct mbox_info * mbox)374 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
375 				u32 opt_param_mask, void *qpc,
376 				struct mbox_info *mbox)
377 {
378 	mbox->out = NULL;
379 	mbox->in = NULL;
380 
381 #define MBOX_ALLOC(mbox, typ)  \
382 	mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
383 
384 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
385 	MLX5_SET(typ##_in, in, opcode, _opcode); \
386 	MLX5_SET(typ##_in, in, qpn, _qpn)
387 
388 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
389 	MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
390 	MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
391 	memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
392 
393 	switch (opcode) {
394 	/* 2RST & 2ERR */
395 	case MLX5_CMD_OP_2RST_QP:
396 		if (MBOX_ALLOC(mbox, qp_2rst))
397 			return -ENOMEM;
398 		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
399 		break;
400 	case MLX5_CMD_OP_2ERR_QP:
401 		if (MBOX_ALLOC(mbox, qp_2err))
402 			return -ENOMEM;
403 		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
404 		break;
405 
406 	/* MODIFY with QPC */
407 	case MLX5_CMD_OP_RST2INIT_QP:
408 		if (MBOX_ALLOC(mbox, rst2init_qp))
409 			return -ENOMEM;
410 		MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
411 				  opt_param_mask, qpc);
412 		break;
413 	case MLX5_CMD_OP_INIT2RTR_QP:
414 		if (MBOX_ALLOC(mbox, init2rtr_qp))
415 			return -ENOMEM;
416 		MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
417 				  opt_param_mask, qpc);
418 		break;
419 	case MLX5_CMD_OP_RTR2RTS_QP:
420 		if (MBOX_ALLOC(mbox, rtr2rts_qp))
421 			return -ENOMEM;
422 		MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
423 				  opt_param_mask, qpc);
424 		break;
425 	case MLX5_CMD_OP_RTS2RTS_QP:
426 		if (MBOX_ALLOC(mbox, rts2rts_qp))
427 			return -ENOMEM;
428 		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
429 				  opt_param_mask, qpc);
430 		break;
431 	case MLX5_CMD_OP_SQERR2RTS_QP:
432 		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
433 			return -ENOMEM;
434 		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
435 				  opt_param_mask, qpc);
436 		break;
437 	case MLX5_CMD_OP_INIT2INIT_QP:
438 		if (MBOX_ALLOC(mbox, init2init_qp))
439 			return -ENOMEM;
440 		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
441 				  opt_param_mask, qpc);
442 		break;
443 	default:
444 		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
445 			      opcode, qpn);
446 		return -EINVAL;
447 	}
448 	return 0;
449 }
450 
mlx5_core_qp_modify(struct mlx5_core_dev * dev,u16 opcode,u32 opt_param_mask,void * qpc,struct mlx5_core_qp * qp)451 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
452 			u32 opt_param_mask, void *qpc,
453 			struct mlx5_core_qp *qp)
454 {
455 	struct mbox_info mbox;
456 	int err;
457 
458 	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
459 				   opt_param_mask, qpc, &mbox);
460 	if (err)
461 		return err;
462 
463 	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
464 	mbox_free(&mbox);
465 	return err;
466 }
467 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
468 
mlx5_init_qp_table(struct mlx5_core_dev * dev)469 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
470 {
471 	struct mlx5_qp_table *table = &dev->priv.qp_table;
472 
473 	memset(table, 0, sizeof(*table));
474 	spin_lock_init(&table->lock);
475 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
476 	mlx5_qp_debugfs_init(dev);
477 }
478 
mlx5_cleanup_qp_table(struct mlx5_core_dev * dev)479 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
480 {
481 	mlx5_qp_debugfs_cleanup(dev);
482 }
483 
mlx5_core_qp_query(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * out,int outlen)484 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
485 		       u32 *out, int outlen)
486 {
487 	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
488 
489 	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
490 	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
491 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
492 }
493 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
494 
mlx5_core_dct_query(struct mlx5_core_dev * dev,struct mlx5_core_dct * dct,u32 * out,int outlen)495 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
496 			u32 *out, int outlen)
497 {
498 	u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
499 	struct mlx5_core_qp *qp = &dct->mqp;
500 
501 	MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
502 	MLX5_SET(query_dct_in, in, dctn, qp->qpn);
503 
504 	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
505 			     (void *)out, outlen);
506 }
507 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
508 
mlx5_core_xrcd_alloc(struct mlx5_core_dev * dev,u32 * xrcdn)509 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
510 {
511 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
512 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
513 	int err;
514 
515 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
516 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
517 	if (!err)
518 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
519 	return err;
520 }
521 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
522 
mlx5_core_xrcd_dealloc(struct mlx5_core_dev * dev,u32 xrcdn)523 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
524 {
525 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
526 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
527 
528 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
529 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
530 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
531 }
532 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
533 
mlx5_core_create_rq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * rq)534 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
535 				struct mlx5_core_qp *rq)
536 {
537 	int err;
538 	u32 rqn;
539 
540 	err = mlx5_core_create_rq(dev, in, inlen, &rqn);
541 	if (err)
542 		return err;
543 
544 	rq->qpn = rqn;
545 	err = create_resource_common(dev, rq, MLX5_RES_RQ);
546 	if (err)
547 		goto err_destroy_rq;
548 
549 	return 0;
550 
551 err_destroy_rq:
552 	mlx5_core_destroy_rq(dev, rq->qpn);
553 
554 	return err;
555 }
556 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
557 
mlx5_core_destroy_rq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * rq)558 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
559 				  struct mlx5_core_qp *rq)
560 {
561 	destroy_resource_common(dev, rq);
562 	mlx5_core_destroy_rq(dev, rq->qpn);
563 }
564 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
565 
mlx5_core_create_sq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * sq)566 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
567 				struct mlx5_core_qp *sq)
568 {
569 	int err;
570 	u32 sqn;
571 
572 	err = mlx5_core_create_sq(dev, in, inlen, &sqn);
573 	if (err)
574 		return err;
575 
576 	sq->qpn = sqn;
577 	err = create_resource_common(dev, sq, MLX5_RES_SQ);
578 	if (err)
579 		goto err_destroy_sq;
580 
581 	return 0;
582 
583 err_destroy_sq:
584 	mlx5_core_destroy_sq(dev, sq->qpn);
585 
586 	return err;
587 }
588 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
589 
mlx5_core_destroy_sq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * sq)590 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
591 				  struct mlx5_core_qp *sq)
592 {
593 	destroy_resource_common(dev, sq);
594 	mlx5_core_destroy_sq(dev, sq->qpn);
595 }
596 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
597 
mlx5_core_alloc_q_counter(struct mlx5_core_dev * dev,u16 * counter_id)598 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
599 {
600 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
601 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
602 	int err;
603 
604 	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
605 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
606 	if (!err)
607 		*counter_id = MLX5_GET(alloc_q_counter_out, out,
608 				       counter_set_id);
609 	return err;
610 }
611 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
612 
mlx5_core_dealloc_q_counter(struct mlx5_core_dev * dev,u16 counter_id)613 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
614 {
615 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
616 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
617 
618 	MLX5_SET(dealloc_q_counter_in, in, opcode,
619 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
620 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
621 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
622 }
623 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
624 
mlx5_core_query_q_counter(struct mlx5_core_dev * dev,u16 counter_id,int reset,void * out,int out_size)625 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
626 			      int reset, void *out, int out_size)
627 {
628 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
629 
630 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
631 	MLX5_SET(query_q_counter_in, in, clear, reset);
632 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
633 	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
634 }
635 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
636