1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/qp.h>
36 #include <linux/mlx5/cq.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 
41 enum {
42 	QP_PID,
43 	QP_STATE,
44 	QP_XPORT,
45 	QP_MTU,
46 	QP_N_RECV,
47 	QP_RECV_SZ,
48 	QP_N_SEND,
49 	QP_LOG_PG_SZ,
50 	QP_RQPN,
51 };
52 
53 static char *qp_fields[] = {
54 	[QP_PID]	= "pid",
55 	[QP_STATE]	= "state",
56 	[QP_XPORT]	= "transport",
57 	[QP_MTU]	= "mtu",
58 	[QP_N_RECV]	= "num_recv",
59 	[QP_RECV_SZ]	= "rcv_wqe_sz",
60 	[QP_N_SEND]	= "num_send",
61 	[QP_LOG_PG_SZ]	= "log2_page_sz",
62 	[QP_RQPN]	= "remote_qpn",
63 };
64 
65 enum {
66 	EQ_NUM_EQES,
67 	EQ_INTR,
68 	EQ_LOG_PG_SZ,
69 };
70 
71 static char *eq_fields[] = {
72 	[EQ_NUM_EQES]	= "num_eqes",
73 	[EQ_INTR]	= "intr",
74 	[EQ_LOG_PG_SZ]	= "log_page_size",
75 };
76 
77 enum {
78 	CQ_PID,
79 	CQ_NUM_CQES,
80 	CQ_LOG_PG_SZ,
81 };
82 
83 static char *cq_fields[] = {
84 	[CQ_PID]	= "pid",
85 	[CQ_NUM_CQES]	= "num_cqes",
86 	[CQ_LOG_PG_SZ]	= "log_page_size",
87 };
88 
89 struct dentry *mlx5_debugfs_root;
90 EXPORT_SYMBOL(mlx5_debugfs_root);
91 
mlx5_register_debugfs(void)92 void mlx5_register_debugfs(void)
93 {
94 	mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
95 }
96 
mlx5_unregister_debugfs(void)97 void mlx5_unregister_debugfs(void)
98 {
99 	debugfs_remove(mlx5_debugfs_root);
100 }
101 
mlx5_qp_debugfs_init(struct mlx5_core_dev * dev)102 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
103 {
104 	atomic_set(&dev->num_qps, 0);
105 
106 	dev->priv.qp_debugfs = debugfs_create_dir("QPs",  dev->priv.dbg_root);
107 }
108 
mlx5_qp_debugfs_cleanup(struct mlx5_core_dev * dev)109 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
110 {
111 	debugfs_remove_recursive(dev->priv.qp_debugfs);
112 }
113 
mlx5_eq_debugfs_init(struct mlx5_core_dev * dev)114 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
115 {
116 	dev->priv.eq_debugfs = debugfs_create_dir("EQs",  dev->priv.dbg_root);
117 }
118 
mlx5_eq_debugfs_cleanup(struct mlx5_core_dev * dev)119 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
120 {
121 	debugfs_remove_recursive(dev->priv.eq_debugfs);
122 }
123 
average_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)124 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
125 			    loff_t *pos)
126 {
127 	struct mlx5_cmd_stats *stats;
128 	u64 field = 0;
129 	int ret;
130 	char tbuf[22];
131 
132 	stats = filp->private_data;
133 	spin_lock_irq(&stats->lock);
134 	if (stats->n)
135 		field = div64_u64(stats->sum, stats->n);
136 	spin_unlock_irq(&stats->lock);
137 	ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
138 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
139 }
140 
average_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)141 static ssize_t average_write(struct file *filp, const char __user *buf,
142 			     size_t count, loff_t *pos)
143 {
144 	struct mlx5_cmd_stats *stats;
145 
146 	stats = filp->private_data;
147 	spin_lock_irq(&stats->lock);
148 	stats->sum = 0;
149 	stats->n = 0;
150 	spin_unlock_irq(&stats->lock);
151 
152 	*pos += count;
153 
154 	return count;
155 }
156 
157 static const struct file_operations stats_fops = {
158 	.owner	= THIS_MODULE,
159 	.open	= simple_open,
160 	.read	= average_read,
161 	.write	= average_write,
162 };
163 
mlx5_cmdif_debugfs_init(struct mlx5_core_dev * dev)164 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
165 {
166 	struct mlx5_cmd_stats *stats;
167 	struct dentry **cmd;
168 	const char *namep;
169 	int i;
170 
171 	cmd = &dev->priv.cmdif_debugfs;
172 	*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
173 
174 	for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
175 		stats = &dev->cmd.stats[i];
176 		namep = mlx5_command_str(i);
177 		if (strcmp(namep, "unknown command opcode")) {
178 			stats->root = debugfs_create_dir(namep, *cmd);
179 
180 			debugfs_create_file("average", 0400, stats->root, stats,
181 					    &stats_fops);
182 			debugfs_create_u64("n", 0400, stats->root, &stats->n);
183 		}
184 	}
185 }
186 
mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev * dev)187 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
188 {
189 	debugfs_remove_recursive(dev->priv.cmdif_debugfs);
190 }
191 
mlx5_cq_debugfs_init(struct mlx5_core_dev * dev)192 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
193 {
194 	dev->priv.cq_debugfs = debugfs_create_dir("CQs",  dev->priv.dbg_root);
195 }
196 
mlx5_cq_debugfs_cleanup(struct mlx5_core_dev * dev)197 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
198 {
199 	debugfs_remove_recursive(dev->priv.cq_debugfs);
200 }
201 
qp_read_field(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int index,int * is_str)202 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
203 			 int index, int *is_str)
204 {
205 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
206 	struct mlx5_qp_context *ctx;
207 	u64 param = 0;
208 	u32 *out;
209 	int err;
210 	int no_sq;
211 
212 	out = kzalloc(outlen, GFP_KERNEL);
213 	if (!out)
214 		return param;
215 
216 	err = mlx5_core_qp_query(dev, qp, out, outlen);
217 	if (err) {
218 		mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
219 		goto out;
220 	}
221 
222 	*is_str = 0;
223 
224 	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
225 	ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
226 
227 	switch (index) {
228 	case QP_PID:
229 		param = qp->pid;
230 		break;
231 	case QP_STATE:
232 		param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
233 		*is_str = 1;
234 		break;
235 	case QP_XPORT:
236 		param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
237 		*is_str = 1;
238 		break;
239 	case QP_MTU:
240 		switch (ctx->mtu_msgmax >> 5) {
241 		case IB_MTU_256:
242 			param = 256;
243 			break;
244 		case IB_MTU_512:
245 			param = 512;
246 			break;
247 		case IB_MTU_1024:
248 			param = 1024;
249 			break;
250 		case IB_MTU_2048:
251 			param = 2048;
252 			break;
253 		case IB_MTU_4096:
254 			param = 4096;
255 			break;
256 		default:
257 			param = 0;
258 		}
259 		break;
260 	case QP_N_RECV:
261 		param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
262 		break;
263 	case QP_RECV_SZ:
264 		param = 1 << ((ctx->rq_size_stride & 7) + 4);
265 		break;
266 	case QP_N_SEND:
267 		no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
268 		if (!no_sq)
269 			param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
270 		else
271 			param = 0;
272 		break;
273 	case QP_LOG_PG_SZ:
274 		param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
275 		param += 12;
276 		break;
277 	case QP_RQPN:
278 		param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
279 		break;
280 	}
281 
282 out:
283 	kfree(out);
284 	return param;
285 }
286 
mlx5_core_eq_query(struct mlx5_core_dev * dev,struct mlx5_eq * eq,u32 * out,int outlen)287 static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
288 			      u32 *out, int outlen)
289 {
290 	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
291 
292 	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
293 	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
294 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
295 }
296 
eq_read_field(struct mlx5_core_dev * dev,struct mlx5_eq * eq,int index)297 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
298 			 int index)
299 {
300 	int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
301 	u64 param = 0;
302 	void *ctx;
303 	u32 *out;
304 	int err;
305 
306 	out = kzalloc(outlen, GFP_KERNEL);
307 	if (!out)
308 		return param;
309 
310 	err = mlx5_core_eq_query(dev, eq, out, outlen);
311 	if (err) {
312 		mlx5_core_warn(dev, "failed to query eq\n");
313 		goto out;
314 	}
315 	ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
316 
317 	switch (index) {
318 	case EQ_NUM_EQES:
319 		param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
320 		break;
321 	case EQ_INTR:
322 		param = MLX5_GET(eqc, ctx, intr);
323 		break;
324 	case EQ_LOG_PG_SZ:
325 		param = MLX5_GET(eqc, ctx, log_page_size) + 12;
326 		break;
327 	}
328 
329 out:
330 	kfree(out);
331 	return param;
332 }
333 
cq_read_field(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,int index)334 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
335 			 int index)
336 {
337 	int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
338 	u64 param = 0;
339 	void *ctx;
340 	u32 *out;
341 	int err;
342 
343 	out = kvzalloc(outlen, GFP_KERNEL);
344 	if (!out)
345 		return param;
346 
347 	err = mlx5_core_query_cq(dev, cq, out, outlen);
348 	if (err) {
349 		mlx5_core_warn(dev, "failed to query cq\n");
350 		goto out;
351 	}
352 	ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
353 
354 	switch (index) {
355 	case CQ_PID:
356 		param = cq->pid;
357 		break;
358 	case CQ_NUM_CQES:
359 		param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
360 		break;
361 	case CQ_LOG_PG_SZ:
362 		param = MLX5_GET(cqc, ctx, log_page_size);
363 		break;
364 	}
365 
366 out:
367 	kvfree(out);
368 	return param;
369 }
370 
dbg_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)371 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
372 			loff_t *pos)
373 {
374 	struct mlx5_field_desc *desc;
375 	struct mlx5_rsc_debug *d;
376 	char tbuf[18];
377 	int is_str = 0;
378 	u64 field;
379 	int ret;
380 
381 	desc = filp->private_data;
382 	d = (void *)(desc - desc->i) - sizeof(*d);
383 	switch (d->type) {
384 	case MLX5_DBG_RSC_QP:
385 		field = qp_read_field(d->dev, d->object, desc->i, &is_str);
386 		break;
387 
388 	case MLX5_DBG_RSC_EQ:
389 		field = eq_read_field(d->dev, d->object, desc->i);
390 		break;
391 
392 	case MLX5_DBG_RSC_CQ:
393 		field = cq_read_field(d->dev, d->object, desc->i);
394 		break;
395 
396 	default:
397 		mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
398 		return -EINVAL;
399 	}
400 
401 	if (is_str)
402 		ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
403 	else
404 		ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
405 
406 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
407 }
408 
409 static const struct file_operations fops = {
410 	.owner	= THIS_MODULE,
411 	.open	= simple_open,
412 	.read	= dbg_read,
413 };
414 
add_res_tree(struct mlx5_core_dev * dev,enum dbg_rsc_type type,struct dentry * root,struct mlx5_rsc_debug ** dbg,int rsn,char ** field,int nfile,void * data)415 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
416 			struct dentry *root, struct mlx5_rsc_debug **dbg,
417 			int rsn, char **field, int nfile, void *data)
418 {
419 	struct mlx5_rsc_debug *d;
420 	char resn[32];
421 	int i;
422 
423 	d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
424 	if (!d)
425 		return -ENOMEM;
426 
427 	d->dev = dev;
428 	d->object = data;
429 	d->type = type;
430 	sprintf(resn, "0x%x", rsn);
431 	d->root = debugfs_create_dir(resn,  root);
432 
433 	for (i = 0; i < nfile; i++) {
434 		d->fields[i].i = i;
435 		debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
436 				    &fops);
437 	}
438 	*dbg = d;
439 
440 	return 0;
441 }
442 
rem_res_tree(struct mlx5_rsc_debug * d)443 static void rem_res_tree(struct mlx5_rsc_debug *d)
444 {
445 	debugfs_remove_recursive(d->root);
446 	kfree(d);
447 }
448 
mlx5_debug_qp_add(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)449 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
450 {
451 	int err;
452 
453 	if (!mlx5_debugfs_root)
454 		return 0;
455 
456 	err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
457 			   &qp->dbg, qp->qpn, qp_fields,
458 			   ARRAY_SIZE(qp_fields), qp);
459 	if (err)
460 		qp->dbg = NULL;
461 
462 	return err;
463 }
464 
mlx5_debug_qp_remove(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)465 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
466 {
467 	if (!mlx5_debugfs_root)
468 		return;
469 
470 	if (qp->dbg)
471 		rem_res_tree(qp->dbg);
472 }
473 
mlx5_debug_eq_add(struct mlx5_core_dev * dev,struct mlx5_eq * eq)474 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
475 {
476 	int err;
477 
478 	if (!mlx5_debugfs_root)
479 		return 0;
480 
481 	err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
482 			   &eq->dbg, eq->eqn, eq_fields,
483 			   ARRAY_SIZE(eq_fields), eq);
484 	if (err)
485 		eq->dbg = NULL;
486 
487 	return err;
488 }
489 
mlx5_debug_eq_remove(struct mlx5_core_dev * dev,struct mlx5_eq * eq)490 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
491 {
492 	if (!mlx5_debugfs_root)
493 		return;
494 
495 	if (eq->dbg)
496 		rem_res_tree(eq->dbg);
497 }
498 
mlx5_debug_cq_add(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)499 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
500 {
501 	int err;
502 
503 	if (!mlx5_debugfs_root)
504 		return 0;
505 
506 	err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
507 			   &cq->dbg, cq->cqn, cq_fields,
508 			   ARRAY_SIZE(cq_fields), cq);
509 	if (err)
510 		cq->dbg = NULL;
511 
512 	return err;
513 }
514 
mlx5_debug_cq_remove(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)515 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
516 {
517 	if (!mlx5_debugfs_root)
518 		return;
519 
520 	if (cq->dbg)
521 		rem_res_tree(cq->dbg);
522 }
523