1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/module.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/qp.h>
36 #include <linux/mlx5/cq.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
39
40 enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50 };
51
52 static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62 };
63
64 enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68 };
69
70 static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74 };
75
76 enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80 };
81
82 static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86 };
87
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
90
mlx5_register_debugfs(void)91 void mlx5_register_debugfs(void)
92 {
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 if (IS_ERR_OR_NULL(mlx5_debugfs_root))
95 mlx5_debugfs_root = NULL;
96 }
97
mlx5_unregister_debugfs(void)98 void mlx5_unregister_debugfs(void)
99 {
100 debugfs_remove(mlx5_debugfs_root);
101 }
102
mlx5_qp_debugfs_init(struct mlx5_core_dev * dev)103 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
104 {
105 if (!mlx5_debugfs_root)
106 return 0;
107
108 atomic_set(&dev->num_qps, 0);
109
110 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
111 if (!dev->priv.qp_debugfs)
112 return -ENOMEM;
113
114 return 0;
115 }
116
mlx5_qp_debugfs_cleanup(struct mlx5_core_dev * dev)117 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
118 {
119 if (!mlx5_debugfs_root)
120 return;
121
122 debugfs_remove_recursive(dev->priv.qp_debugfs);
123 }
124
mlx5_eq_debugfs_init(struct mlx5_core_dev * dev)125 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
126 {
127 if (!mlx5_debugfs_root)
128 return 0;
129
130 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
131 if (!dev->priv.eq_debugfs)
132 return -ENOMEM;
133
134 return 0;
135 }
136
mlx5_eq_debugfs_cleanup(struct mlx5_core_dev * dev)137 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
138 {
139 if (!mlx5_debugfs_root)
140 return;
141
142 debugfs_remove_recursive(dev->priv.eq_debugfs);
143 }
144
average_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)145 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
146 loff_t *pos)
147 {
148 struct mlx5_cmd_stats *stats;
149 u64 field = 0;
150 int ret;
151 char tbuf[22];
152
153 stats = filp->private_data;
154 spin_lock_irq(&stats->lock);
155 if (stats->n)
156 field = div64_u64(stats->sum, stats->n);
157 spin_unlock_irq(&stats->lock);
158 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
159 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
160 }
161
average_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)162 static ssize_t average_write(struct file *filp, const char __user *buf,
163 size_t count, loff_t *pos)
164 {
165 struct mlx5_cmd_stats *stats;
166
167 stats = filp->private_data;
168 spin_lock_irq(&stats->lock);
169 stats->sum = 0;
170 stats->n = 0;
171 spin_unlock_irq(&stats->lock);
172
173 *pos += count;
174
175 return count;
176 }
177
178 static const struct file_operations stats_fops = {
179 .owner = THIS_MODULE,
180 .open = simple_open,
181 .read = average_read,
182 .write = average_write,
183 };
184
mlx5_cmdif_debugfs_init(struct mlx5_core_dev * dev)185 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
186 {
187 struct mlx5_cmd_stats *stats;
188 struct dentry **cmd;
189 const char *namep;
190 int err;
191 int i;
192
193 if (!mlx5_debugfs_root)
194 return 0;
195
196 cmd = &dev->priv.cmdif_debugfs;
197 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
198 if (!*cmd)
199 return -ENOMEM;
200
201 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
202 stats = &dev->cmd.stats[i];
203 namep = mlx5_command_str(i);
204 if (strcmp(namep, "unknown command opcode")) {
205 stats->root = debugfs_create_dir(namep, *cmd);
206 if (!stats->root) {
207 mlx5_core_warn(dev, "failed adding command %d\n",
208 i);
209 err = -ENOMEM;
210 goto out;
211 }
212
213 stats->avg = debugfs_create_file("average", 0400,
214 stats->root, stats,
215 &stats_fops);
216 if (!stats->avg) {
217 mlx5_core_warn(dev, "failed creating debugfs file\n");
218 err = -ENOMEM;
219 goto out;
220 }
221
222 stats->count = debugfs_create_u64("n", 0400,
223 stats->root,
224 &stats->n);
225 if (!stats->count) {
226 mlx5_core_warn(dev, "failed creating debugfs file\n");
227 err = -ENOMEM;
228 goto out;
229 }
230 }
231 }
232
233 return 0;
234 out:
235 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
236 return err;
237 }
238
mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev * dev)239 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
240 {
241 if (!mlx5_debugfs_root)
242 return;
243
244 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
245 }
246
mlx5_cq_debugfs_init(struct mlx5_core_dev * dev)247 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
248 {
249 if (!mlx5_debugfs_root)
250 return 0;
251
252 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
253 if (!dev->priv.cq_debugfs)
254 return -ENOMEM;
255
256 return 0;
257 }
258
mlx5_cq_debugfs_cleanup(struct mlx5_core_dev * dev)259 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
260 {
261 if (!mlx5_debugfs_root)
262 return;
263
264 debugfs_remove_recursive(dev->priv.cq_debugfs);
265 }
266
qp_read_field(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int index,int * is_str)267 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
268 int index, int *is_str)
269 {
270 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
271 struct mlx5_qp_context *ctx;
272 u64 param = 0;
273 u32 *out;
274 int err;
275 int no_sq;
276
277 out = kzalloc(outlen, GFP_KERNEL);
278 if (!out)
279 return param;
280
281 err = mlx5_core_qp_query(dev, qp, out, outlen);
282 if (err) {
283 mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
284 goto out;
285 }
286
287 *is_str = 0;
288
289 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
290 ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
291
292 switch (index) {
293 case QP_PID:
294 param = qp->pid;
295 break;
296 case QP_STATE:
297 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
298 *is_str = 1;
299 break;
300 case QP_XPORT:
301 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
302 *is_str = 1;
303 break;
304 case QP_MTU:
305 switch (ctx->mtu_msgmax >> 5) {
306 case IB_MTU_256:
307 param = 256;
308 break;
309 case IB_MTU_512:
310 param = 512;
311 break;
312 case IB_MTU_1024:
313 param = 1024;
314 break;
315 case IB_MTU_2048:
316 param = 2048;
317 break;
318 case IB_MTU_4096:
319 param = 4096;
320 break;
321 default:
322 param = 0;
323 }
324 break;
325 case QP_N_RECV:
326 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
327 break;
328 case QP_RECV_SZ:
329 param = 1 << ((ctx->rq_size_stride & 7) + 4);
330 break;
331 case QP_N_SEND:
332 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
333 if (!no_sq)
334 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
335 else
336 param = 0;
337 break;
338 case QP_LOG_PG_SZ:
339 param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
340 param += 12;
341 break;
342 case QP_RQPN:
343 param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
344 break;
345 }
346
347 out:
348 kfree(out);
349 return param;
350 }
351
eq_read_field(struct mlx5_core_dev * dev,struct mlx5_eq * eq,int index)352 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
353 int index)
354 {
355 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
356 u64 param = 0;
357 void *ctx;
358 u32 *out;
359 int err;
360
361 out = kzalloc(outlen, GFP_KERNEL);
362 if (!out)
363 return param;
364
365 err = mlx5_core_eq_query(dev, eq, out, outlen);
366 if (err) {
367 mlx5_core_warn(dev, "failed to query eq\n");
368 goto out;
369 }
370 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
371
372 switch (index) {
373 case EQ_NUM_EQES:
374 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
375 break;
376 case EQ_INTR:
377 param = MLX5_GET(eqc, ctx, intr);
378 break;
379 case EQ_LOG_PG_SZ:
380 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
381 break;
382 }
383
384 out:
385 kfree(out);
386 return param;
387 }
388
cq_read_field(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,int index)389 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
390 int index)
391 {
392 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
393 u64 param = 0;
394 void *ctx;
395 u32 *out;
396 int err;
397
398 out = kvzalloc(outlen, GFP_KERNEL);
399 if (!out)
400 return param;
401
402 err = mlx5_core_query_cq(dev, cq, out, outlen);
403 if (err) {
404 mlx5_core_warn(dev, "failed to query cq\n");
405 goto out;
406 }
407 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
408
409 switch (index) {
410 case CQ_PID:
411 param = cq->pid;
412 break;
413 case CQ_NUM_CQES:
414 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
415 break;
416 case CQ_LOG_PG_SZ:
417 param = MLX5_GET(cqc, ctx, log_page_size);
418 break;
419 }
420
421 out:
422 kvfree(out);
423 return param;
424 }
425
dbg_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)426 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
427 loff_t *pos)
428 {
429 struct mlx5_field_desc *desc;
430 struct mlx5_rsc_debug *d;
431 char tbuf[18];
432 int is_str = 0;
433 u64 field;
434 int ret;
435
436 desc = filp->private_data;
437 d = (void *)(desc - desc->i) - sizeof(*d);
438 switch (d->type) {
439 case MLX5_DBG_RSC_QP:
440 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
441 break;
442
443 case MLX5_DBG_RSC_EQ:
444 field = eq_read_field(d->dev, d->object, desc->i);
445 break;
446
447 case MLX5_DBG_RSC_CQ:
448 field = cq_read_field(d->dev, d->object, desc->i);
449 break;
450
451 default:
452 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
453 return -EINVAL;
454 }
455
456 if (is_str)
457 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
458 else
459 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
460
461 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
462 }
463
464 static const struct file_operations fops = {
465 .owner = THIS_MODULE,
466 .open = simple_open,
467 .read = dbg_read,
468 };
469
add_res_tree(struct mlx5_core_dev * dev,enum dbg_rsc_type type,struct dentry * root,struct mlx5_rsc_debug ** dbg,int rsn,char ** field,int nfile,void * data)470 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
471 struct dentry *root, struct mlx5_rsc_debug **dbg,
472 int rsn, char **field, int nfile, void *data)
473 {
474 struct mlx5_rsc_debug *d;
475 char resn[32];
476 int err;
477 int i;
478
479 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
480 if (!d)
481 return -ENOMEM;
482
483 d->dev = dev;
484 d->object = data;
485 d->type = type;
486 sprintf(resn, "0x%x", rsn);
487 d->root = debugfs_create_dir(resn, root);
488 if (!d->root) {
489 err = -ENOMEM;
490 goto out_free;
491 }
492
493 for (i = 0; i < nfile; i++) {
494 d->fields[i].i = i;
495 d->fields[i].dent = debugfs_create_file(field[i], 0400,
496 d->root, &d->fields[i],
497 &fops);
498 if (!d->fields[i].dent) {
499 err = -ENOMEM;
500 goto out_rem;
501 }
502 }
503 *dbg = d;
504
505 return 0;
506 out_rem:
507 debugfs_remove_recursive(d->root);
508
509 out_free:
510 kfree(d);
511 return err;
512 }
513
rem_res_tree(struct mlx5_rsc_debug * d)514 static void rem_res_tree(struct mlx5_rsc_debug *d)
515 {
516 debugfs_remove_recursive(d->root);
517 kfree(d);
518 }
519
mlx5_debug_qp_add(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)520 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
521 {
522 int err;
523
524 if (!mlx5_debugfs_root)
525 return 0;
526
527 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
528 &qp->dbg, qp->qpn, qp_fields,
529 ARRAY_SIZE(qp_fields), qp);
530 if (err)
531 qp->dbg = NULL;
532
533 return err;
534 }
535
mlx5_debug_qp_remove(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)536 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
537 {
538 if (!mlx5_debugfs_root)
539 return;
540
541 if (qp->dbg)
542 rem_res_tree(qp->dbg);
543 }
544
mlx5_debug_eq_add(struct mlx5_core_dev * dev,struct mlx5_eq * eq)545 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
546 {
547 int err;
548
549 if (!mlx5_debugfs_root)
550 return 0;
551
552 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
553 &eq->dbg, eq->eqn, eq_fields,
554 ARRAY_SIZE(eq_fields), eq);
555 if (err)
556 eq->dbg = NULL;
557
558 return err;
559 }
560
mlx5_debug_eq_remove(struct mlx5_core_dev * dev,struct mlx5_eq * eq)561 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
562 {
563 if (!mlx5_debugfs_root)
564 return;
565
566 if (eq->dbg)
567 rem_res_tree(eq->dbg);
568 }
569
mlx5_debug_cq_add(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)570 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
571 {
572 int err;
573
574 if (!mlx5_debugfs_root)
575 return 0;
576
577 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
578 &cq->dbg, cq->cqn, cq_fields,
579 ARRAY_SIZE(cq_fields), cq);
580 if (err)
581 cq->dbg = NULL;
582
583 return err;
584 }
585
mlx5_debug_cq_remove(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)586 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
587 {
588 if (!mlx5_debugfs_root)
589 return;
590
591 if (cq->dbg)
592 rem_res_tree(cq->dbg);
593 }
594