1 /*
2  * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
45 
46 #include "mlx5_core.h"
47 #include "lib/eq.h"
48 
49 enum {
50 	CMD_IF_REV = 5,
51 };
52 
53 enum {
54 	CMD_MODE_POLLING,
55 	CMD_MODE_EVENTS
56 };
57 
58 enum {
59 	MLX5_CMD_DELIVERY_STAT_OK			= 0x0,
60 	MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR		= 0x1,
61 	MLX5_CMD_DELIVERY_STAT_TOK_ERR			= 0x2,
62 	MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR		= 0x3,
63 	MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR	= 0x4,
64 	MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR		= 0x5,
65 	MLX5_CMD_DELIVERY_STAT_FW_ERR			= 0x6,
66 	MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR		= 0x7,
67 	MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR		= 0x8,
68 	MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR	= 0x9,
69 	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
70 };
71 
72 static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)73 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
74 	      struct mlx5_cmd_msg *out, void *uout, int uout_size,
75 	      mlx5_cmd_cbk_t cbk, void *context, int page_queue)
76 {
77 	gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
78 	struct mlx5_cmd_work_ent *ent;
79 
80 	ent = kzalloc(sizeof(*ent), alloc_flags);
81 	if (!ent)
82 		return ERR_PTR(-ENOMEM);
83 
84 	ent->idx	= -EINVAL;
85 	ent->in		= in;
86 	ent->out	= out;
87 	ent->uout	= uout;
88 	ent->uout_size	= uout_size;
89 	ent->callback	= cbk;
90 	ent->context	= context;
91 	ent->cmd	= cmd;
92 	ent->page_queue = page_queue;
93 	refcount_set(&ent->refcnt, 1);
94 
95 	return ent;
96 }
97 
cmd_free_ent(struct mlx5_cmd_work_ent * ent)98 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
99 {
100 	kfree(ent);
101 }
102 
alloc_token(struct mlx5_cmd * cmd)103 static u8 alloc_token(struct mlx5_cmd *cmd)
104 {
105 	u8 token;
106 
107 	spin_lock(&cmd->token_lock);
108 	cmd->token++;
109 	if (cmd->token == 0)
110 		cmd->token++;
111 	token = cmd->token;
112 	spin_unlock(&cmd->token_lock);
113 
114 	return token;
115 }
116 
cmd_alloc_index(struct mlx5_cmd * cmd)117 static int cmd_alloc_index(struct mlx5_cmd *cmd)
118 {
119 	unsigned long flags;
120 	int ret;
121 
122 	spin_lock_irqsave(&cmd->alloc_lock, flags);
123 	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
124 	if (ret < cmd->max_reg_cmds)
125 		clear_bit(ret, &cmd->bitmask);
126 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
127 
128 	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
129 }
130 
cmd_free_index(struct mlx5_cmd * cmd,int idx)131 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
132 {
133 	unsigned long flags;
134 
135 	spin_lock_irqsave(&cmd->alloc_lock, flags);
136 	set_bit(idx, &cmd->bitmask);
137 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
138 }
139 
cmd_ent_get(struct mlx5_cmd_work_ent * ent)140 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
141 {
142 	refcount_inc(&ent->refcnt);
143 }
144 
cmd_ent_put(struct mlx5_cmd_work_ent * ent)145 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
146 {
147 	if (!refcount_dec_and_test(&ent->refcnt))
148 		return;
149 
150 	if (ent->idx >= 0)
151 		cmd_free_index(ent->cmd, ent->idx);
152 
153 	cmd_free_ent(ent);
154 }
155 
get_inst(struct mlx5_cmd * cmd,int idx)156 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
157 {
158 	return cmd->cmd_buf + (idx << cmd->log_stride);
159 }
160 
mlx5_calc_cmd_blocks(struct mlx5_cmd_msg * msg)161 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
162 {
163 	int size = msg->len;
164 	int blen = size - min_t(int, sizeof(msg->first.data), size);
165 
166 	return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
167 }
168 
xor8_buf(void * buf,size_t offset,int len)169 static u8 xor8_buf(void *buf, size_t offset, int len)
170 {
171 	u8 *ptr = buf;
172 	u8 sum = 0;
173 	int i;
174 	int end = len + offset;
175 
176 	for (i = offset; i < end; i++)
177 		sum ^= ptr[i];
178 
179 	return sum;
180 }
181 
verify_block_sig(struct mlx5_cmd_prot_block * block)182 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
183 {
184 	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
185 	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
186 
187 	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
188 		return -EINVAL;
189 
190 	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
191 		return -EINVAL;
192 
193 	return 0;
194 }
195 
calc_block_sig(struct mlx5_cmd_prot_block * block)196 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
197 {
198 	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
199 	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
200 
201 	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
202 	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
203 }
204 
calc_chain_sig(struct mlx5_cmd_msg * msg)205 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
206 {
207 	struct mlx5_cmd_mailbox *next = msg->next;
208 	int n = mlx5_calc_cmd_blocks(msg);
209 	int i = 0;
210 
211 	for (i = 0; i < n && next; i++)  {
212 		calc_block_sig(next->buf);
213 		next = next->next;
214 	}
215 }
216 
set_signature(struct mlx5_cmd_work_ent * ent,int csum)217 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
218 {
219 	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
220 	if (csum) {
221 		calc_chain_sig(ent->in);
222 		calc_chain_sig(ent->out);
223 	}
224 }
225 
poll_timeout(struct mlx5_cmd_work_ent * ent)226 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
227 {
228 	unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
229 	u8 own;
230 
231 	do {
232 		own = READ_ONCE(ent->lay->status_own);
233 		if (!(own & CMD_OWNER_HW)) {
234 			ent->ret = 0;
235 			return;
236 		}
237 		cond_resched();
238 	} while (time_before(jiffies, poll_end));
239 
240 	ent->ret = -ETIMEDOUT;
241 }
242 
verify_signature(struct mlx5_cmd_work_ent * ent)243 static int verify_signature(struct mlx5_cmd_work_ent *ent)
244 {
245 	struct mlx5_cmd_mailbox *next = ent->out->next;
246 	int n = mlx5_calc_cmd_blocks(ent->out);
247 	int err;
248 	u8 sig;
249 	int i = 0;
250 
251 	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
252 	if (sig != 0xff)
253 		return -EINVAL;
254 
255 	for (i = 0; i < n && next; i++) {
256 		err = verify_block_sig(next->buf);
257 		if (err)
258 			return err;
259 
260 		next = next->next;
261 	}
262 
263 	return 0;
264 }
265 
dump_buf(void * buf,int size,int data_only,int offset)266 static void dump_buf(void *buf, int size, int data_only, int offset)
267 {
268 	__be32 *p = buf;
269 	int i;
270 
271 	for (i = 0; i < size; i += 16) {
272 		pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
273 			 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
274 			 be32_to_cpu(p[3]));
275 		p += 4;
276 		offset += 16;
277 	}
278 	if (!data_only)
279 		pr_debug("\n");
280 }
281 
mlx5_internal_err_ret_value(struct mlx5_core_dev * dev,u16 op,u32 * synd,u8 * status)282 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
283 				       u32 *synd, u8 *status)
284 {
285 	*synd = 0;
286 	*status = 0;
287 
288 	switch (op) {
289 	case MLX5_CMD_OP_TEARDOWN_HCA:
290 	case MLX5_CMD_OP_DISABLE_HCA:
291 	case MLX5_CMD_OP_MANAGE_PAGES:
292 	case MLX5_CMD_OP_DESTROY_MKEY:
293 	case MLX5_CMD_OP_DESTROY_EQ:
294 	case MLX5_CMD_OP_DESTROY_CQ:
295 	case MLX5_CMD_OP_DESTROY_QP:
296 	case MLX5_CMD_OP_DESTROY_PSV:
297 	case MLX5_CMD_OP_DESTROY_SRQ:
298 	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
299 	case MLX5_CMD_OP_DESTROY_XRQ:
300 	case MLX5_CMD_OP_DESTROY_DCT:
301 	case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
302 	case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
303 	case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
304 	case MLX5_CMD_OP_DEALLOC_PD:
305 	case MLX5_CMD_OP_DEALLOC_UAR:
306 	case MLX5_CMD_OP_DETACH_FROM_MCG:
307 	case MLX5_CMD_OP_DEALLOC_XRCD:
308 	case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
309 	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
310 	case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
311 	case MLX5_CMD_OP_DESTROY_LAG:
312 	case MLX5_CMD_OP_DESTROY_VPORT_LAG:
313 	case MLX5_CMD_OP_DESTROY_TIR:
314 	case MLX5_CMD_OP_DESTROY_SQ:
315 	case MLX5_CMD_OP_DESTROY_RQ:
316 	case MLX5_CMD_OP_DESTROY_RMP:
317 	case MLX5_CMD_OP_DESTROY_TIS:
318 	case MLX5_CMD_OP_DESTROY_RQT:
319 	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
320 	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
321 	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
322 	case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
323 	case MLX5_CMD_OP_2ERR_QP:
324 	case MLX5_CMD_OP_2RST_QP:
325 	case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
326 	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
327 	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
328 	case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
329 	case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
330 	case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
331 	case MLX5_CMD_OP_FPGA_DESTROY_QP:
332 	case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
333 	case MLX5_CMD_OP_DEALLOC_MEMIC:
334 	case MLX5_CMD_OP_PAGE_FAULT_RESUME:
335 	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
336 		return MLX5_CMD_STAT_OK;
337 
338 	case MLX5_CMD_OP_QUERY_HCA_CAP:
339 	case MLX5_CMD_OP_QUERY_ADAPTER:
340 	case MLX5_CMD_OP_INIT_HCA:
341 	case MLX5_CMD_OP_ENABLE_HCA:
342 	case MLX5_CMD_OP_QUERY_PAGES:
343 	case MLX5_CMD_OP_SET_HCA_CAP:
344 	case MLX5_CMD_OP_QUERY_ISSI:
345 	case MLX5_CMD_OP_SET_ISSI:
346 	case MLX5_CMD_OP_CREATE_MKEY:
347 	case MLX5_CMD_OP_QUERY_MKEY:
348 	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
349 	case MLX5_CMD_OP_CREATE_EQ:
350 	case MLX5_CMD_OP_QUERY_EQ:
351 	case MLX5_CMD_OP_GEN_EQE:
352 	case MLX5_CMD_OP_CREATE_CQ:
353 	case MLX5_CMD_OP_QUERY_CQ:
354 	case MLX5_CMD_OP_MODIFY_CQ:
355 	case MLX5_CMD_OP_CREATE_QP:
356 	case MLX5_CMD_OP_RST2INIT_QP:
357 	case MLX5_CMD_OP_INIT2RTR_QP:
358 	case MLX5_CMD_OP_RTR2RTS_QP:
359 	case MLX5_CMD_OP_RTS2RTS_QP:
360 	case MLX5_CMD_OP_SQERR2RTS_QP:
361 	case MLX5_CMD_OP_QUERY_QP:
362 	case MLX5_CMD_OP_SQD_RTS_QP:
363 	case MLX5_CMD_OP_INIT2INIT_QP:
364 	case MLX5_CMD_OP_CREATE_PSV:
365 	case MLX5_CMD_OP_CREATE_SRQ:
366 	case MLX5_CMD_OP_QUERY_SRQ:
367 	case MLX5_CMD_OP_ARM_RQ:
368 	case MLX5_CMD_OP_CREATE_XRC_SRQ:
369 	case MLX5_CMD_OP_QUERY_XRC_SRQ:
370 	case MLX5_CMD_OP_ARM_XRC_SRQ:
371 	case MLX5_CMD_OP_CREATE_XRQ:
372 	case MLX5_CMD_OP_QUERY_XRQ:
373 	case MLX5_CMD_OP_ARM_XRQ:
374 	case MLX5_CMD_OP_CREATE_DCT:
375 	case MLX5_CMD_OP_DRAIN_DCT:
376 	case MLX5_CMD_OP_QUERY_DCT:
377 	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
378 	case MLX5_CMD_OP_QUERY_VPORT_STATE:
379 	case MLX5_CMD_OP_MODIFY_VPORT_STATE:
380 	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
381 	case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
382 	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
383 	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
384 	case MLX5_CMD_OP_SET_ROCE_ADDRESS:
385 	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
386 	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
387 	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
388 	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
389 	case MLX5_CMD_OP_QUERY_VNIC_ENV:
390 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
391 	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
392 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
393 	case MLX5_CMD_OP_SET_MONITOR_COUNTER:
394 	case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
395 	case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
396 	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
397 	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
398 	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
399 	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
400 	case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
401 	case MLX5_CMD_OP_ALLOC_PD:
402 	case MLX5_CMD_OP_ALLOC_UAR:
403 	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
404 	case MLX5_CMD_OP_ACCESS_REG:
405 	case MLX5_CMD_OP_ATTACH_TO_MCG:
406 	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
407 	case MLX5_CMD_OP_MAD_IFC:
408 	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
409 	case MLX5_CMD_OP_SET_MAD_DEMUX:
410 	case MLX5_CMD_OP_NOP:
411 	case MLX5_CMD_OP_ALLOC_XRCD:
412 	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
413 	case MLX5_CMD_OP_QUERY_CONG_STATUS:
414 	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
415 	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
416 	case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
417 	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
418 	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
419 	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
420 	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
421 	case MLX5_CMD_OP_CREATE_LAG:
422 	case MLX5_CMD_OP_MODIFY_LAG:
423 	case MLX5_CMD_OP_QUERY_LAG:
424 	case MLX5_CMD_OP_CREATE_VPORT_LAG:
425 	case MLX5_CMD_OP_CREATE_TIR:
426 	case MLX5_CMD_OP_MODIFY_TIR:
427 	case MLX5_CMD_OP_QUERY_TIR:
428 	case MLX5_CMD_OP_CREATE_SQ:
429 	case MLX5_CMD_OP_MODIFY_SQ:
430 	case MLX5_CMD_OP_QUERY_SQ:
431 	case MLX5_CMD_OP_CREATE_RQ:
432 	case MLX5_CMD_OP_MODIFY_RQ:
433 	case MLX5_CMD_OP_QUERY_RQ:
434 	case MLX5_CMD_OP_CREATE_RMP:
435 	case MLX5_CMD_OP_MODIFY_RMP:
436 	case MLX5_CMD_OP_QUERY_RMP:
437 	case MLX5_CMD_OP_CREATE_TIS:
438 	case MLX5_CMD_OP_MODIFY_TIS:
439 	case MLX5_CMD_OP_QUERY_TIS:
440 	case MLX5_CMD_OP_CREATE_RQT:
441 	case MLX5_CMD_OP_MODIFY_RQT:
442 	case MLX5_CMD_OP_QUERY_RQT:
443 
444 	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
445 	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
446 	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
447 	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
448 	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
449 	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
450 	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
451 	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
452 	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
453 	case MLX5_CMD_OP_FPGA_CREATE_QP:
454 	case MLX5_CMD_OP_FPGA_MODIFY_QP:
455 	case MLX5_CMD_OP_FPGA_QUERY_QP:
456 	case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
457 	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
458 	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
459 	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
460 	case MLX5_CMD_OP_CREATE_UCTX:
461 	case MLX5_CMD_OP_DESTROY_UCTX:
462 	case MLX5_CMD_OP_CREATE_UMEM:
463 	case MLX5_CMD_OP_DESTROY_UMEM:
464 	case MLX5_CMD_OP_ALLOC_MEMIC:
465 	case MLX5_CMD_OP_MODIFY_XRQ:
466 	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
467 		*status = MLX5_DRIVER_STATUS_ABORTED;
468 		*synd = MLX5_DRIVER_SYND;
469 		return -EIO;
470 	default:
471 		mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
472 		return -EINVAL;
473 	}
474 }
475 
mlx5_command_str(int command)476 const char *mlx5_command_str(int command)
477 {
478 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
479 
480 	switch (command) {
481 	MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
482 	MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
483 	MLX5_COMMAND_STR_CASE(INIT_HCA);
484 	MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
485 	MLX5_COMMAND_STR_CASE(ENABLE_HCA);
486 	MLX5_COMMAND_STR_CASE(DISABLE_HCA);
487 	MLX5_COMMAND_STR_CASE(QUERY_PAGES);
488 	MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
489 	MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
490 	MLX5_COMMAND_STR_CASE(QUERY_ISSI);
491 	MLX5_COMMAND_STR_CASE(SET_ISSI);
492 	MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
493 	MLX5_COMMAND_STR_CASE(CREATE_MKEY);
494 	MLX5_COMMAND_STR_CASE(QUERY_MKEY);
495 	MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
496 	MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
497 	MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
498 	MLX5_COMMAND_STR_CASE(CREATE_EQ);
499 	MLX5_COMMAND_STR_CASE(DESTROY_EQ);
500 	MLX5_COMMAND_STR_CASE(QUERY_EQ);
501 	MLX5_COMMAND_STR_CASE(GEN_EQE);
502 	MLX5_COMMAND_STR_CASE(CREATE_CQ);
503 	MLX5_COMMAND_STR_CASE(DESTROY_CQ);
504 	MLX5_COMMAND_STR_CASE(QUERY_CQ);
505 	MLX5_COMMAND_STR_CASE(MODIFY_CQ);
506 	MLX5_COMMAND_STR_CASE(CREATE_QP);
507 	MLX5_COMMAND_STR_CASE(DESTROY_QP);
508 	MLX5_COMMAND_STR_CASE(RST2INIT_QP);
509 	MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
510 	MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
511 	MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
512 	MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
513 	MLX5_COMMAND_STR_CASE(2ERR_QP);
514 	MLX5_COMMAND_STR_CASE(2RST_QP);
515 	MLX5_COMMAND_STR_CASE(QUERY_QP);
516 	MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
517 	MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
518 	MLX5_COMMAND_STR_CASE(CREATE_PSV);
519 	MLX5_COMMAND_STR_CASE(DESTROY_PSV);
520 	MLX5_COMMAND_STR_CASE(CREATE_SRQ);
521 	MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
522 	MLX5_COMMAND_STR_CASE(QUERY_SRQ);
523 	MLX5_COMMAND_STR_CASE(ARM_RQ);
524 	MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
525 	MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
526 	MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
527 	MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
528 	MLX5_COMMAND_STR_CASE(CREATE_DCT);
529 	MLX5_COMMAND_STR_CASE(DESTROY_DCT);
530 	MLX5_COMMAND_STR_CASE(DRAIN_DCT);
531 	MLX5_COMMAND_STR_CASE(QUERY_DCT);
532 	MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
533 	MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
534 	MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
535 	MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
536 	MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
537 	MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
538 	MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
539 	MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
540 	MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
541 	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
542 	MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
543 	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
544 	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
545 	MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
546 	MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
547 	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
548 	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
549 	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
550 	MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
551 	MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
552 	MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
553 	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
554 	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
555 	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
556 	MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
557 	MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
558 	MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
559 	MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
560 	MLX5_COMMAND_STR_CASE(ALLOC_PD);
561 	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
562 	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
563 	MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
564 	MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
565 	MLX5_COMMAND_STR_CASE(ACCESS_REG);
566 	MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
567 	MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
568 	MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
569 	MLX5_COMMAND_STR_CASE(MAD_IFC);
570 	MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
571 	MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
572 	MLX5_COMMAND_STR_CASE(NOP);
573 	MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
574 	MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
575 	MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
576 	MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
577 	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
578 	MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
579 	MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
580 	MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
581 	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
582 	MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
583 	MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
584 	MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
585 	MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
586 	MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
587 	MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
588 	MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
589 	MLX5_COMMAND_STR_CASE(CREATE_LAG);
590 	MLX5_COMMAND_STR_CASE(MODIFY_LAG);
591 	MLX5_COMMAND_STR_CASE(QUERY_LAG);
592 	MLX5_COMMAND_STR_CASE(DESTROY_LAG);
593 	MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
594 	MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
595 	MLX5_COMMAND_STR_CASE(CREATE_TIR);
596 	MLX5_COMMAND_STR_CASE(MODIFY_TIR);
597 	MLX5_COMMAND_STR_CASE(DESTROY_TIR);
598 	MLX5_COMMAND_STR_CASE(QUERY_TIR);
599 	MLX5_COMMAND_STR_CASE(CREATE_SQ);
600 	MLX5_COMMAND_STR_CASE(MODIFY_SQ);
601 	MLX5_COMMAND_STR_CASE(DESTROY_SQ);
602 	MLX5_COMMAND_STR_CASE(QUERY_SQ);
603 	MLX5_COMMAND_STR_CASE(CREATE_RQ);
604 	MLX5_COMMAND_STR_CASE(MODIFY_RQ);
605 	MLX5_COMMAND_STR_CASE(DESTROY_RQ);
606 	MLX5_COMMAND_STR_CASE(QUERY_RQ);
607 	MLX5_COMMAND_STR_CASE(CREATE_RMP);
608 	MLX5_COMMAND_STR_CASE(MODIFY_RMP);
609 	MLX5_COMMAND_STR_CASE(DESTROY_RMP);
610 	MLX5_COMMAND_STR_CASE(QUERY_RMP);
611 	MLX5_COMMAND_STR_CASE(CREATE_TIS);
612 	MLX5_COMMAND_STR_CASE(MODIFY_TIS);
613 	MLX5_COMMAND_STR_CASE(DESTROY_TIS);
614 	MLX5_COMMAND_STR_CASE(QUERY_TIS);
615 	MLX5_COMMAND_STR_CASE(CREATE_RQT);
616 	MLX5_COMMAND_STR_CASE(MODIFY_RQT);
617 	MLX5_COMMAND_STR_CASE(DESTROY_RQT);
618 	MLX5_COMMAND_STR_CASE(QUERY_RQT);
619 	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
620 	MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
621 	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
622 	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
623 	MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
624 	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
625 	MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
626 	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
627 	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
628 	MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
629 	MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
630 	MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
631 	MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
632 	MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
633 	MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
634 	MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
635 	MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
636 	MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
637 	MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
638 	MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
639 	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
640 	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
641 	MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
642 	MLX5_COMMAND_STR_CASE(CREATE_XRQ);
643 	MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
644 	MLX5_COMMAND_STR_CASE(QUERY_XRQ);
645 	MLX5_COMMAND_STR_CASE(ARM_XRQ);
646 	MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
647 	MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
648 	MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
649 	MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
650 	MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
651 	MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
652 	MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
653 	MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
654 	MLX5_COMMAND_STR_CASE(CREATE_UCTX);
655 	MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
656 	MLX5_COMMAND_STR_CASE(CREATE_UMEM);
657 	MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
658 	MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
659 	MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
660 	default: return "unknown command opcode";
661 	}
662 }
663 
cmd_status_str(u8 status)664 static const char *cmd_status_str(u8 status)
665 {
666 	switch (status) {
667 	case MLX5_CMD_STAT_OK:
668 		return "OK";
669 	case MLX5_CMD_STAT_INT_ERR:
670 		return "internal error";
671 	case MLX5_CMD_STAT_BAD_OP_ERR:
672 		return "bad operation";
673 	case MLX5_CMD_STAT_BAD_PARAM_ERR:
674 		return "bad parameter";
675 	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
676 		return "bad system state";
677 	case MLX5_CMD_STAT_BAD_RES_ERR:
678 		return "bad resource";
679 	case MLX5_CMD_STAT_RES_BUSY:
680 		return "resource busy";
681 	case MLX5_CMD_STAT_LIM_ERR:
682 		return "limits exceeded";
683 	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
684 		return "bad resource state";
685 	case MLX5_CMD_STAT_IX_ERR:
686 		return "bad index";
687 	case MLX5_CMD_STAT_NO_RES_ERR:
688 		return "no resources";
689 	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
690 		return "bad input length";
691 	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
692 		return "bad output length";
693 	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
694 		return "bad QP state";
695 	case MLX5_CMD_STAT_BAD_PKT_ERR:
696 		return "bad packet (discarded)";
697 	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
698 		return "bad size too many outstanding CQEs";
699 	default:
700 		return "unknown status";
701 	}
702 }
703 
cmd_status_to_err(u8 status)704 static int cmd_status_to_err(u8 status)
705 {
706 	switch (status) {
707 	case MLX5_CMD_STAT_OK:				return 0;
708 	case MLX5_CMD_STAT_INT_ERR:			return -EIO;
709 	case MLX5_CMD_STAT_BAD_OP_ERR:			return -EINVAL;
710 	case MLX5_CMD_STAT_BAD_PARAM_ERR:		return -EINVAL;
711 	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:		return -EIO;
712 	case MLX5_CMD_STAT_BAD_RES_ERR:			return -EINVAL;
713 	case MLX5_CMD_STAT_RES_BUSY:			return -EBUSY;
714 	case MLX5_CMD_STAT_LIM_ERR:			return -ENOMEM;
715 	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:		return -EINVAL;
716 	case MLX5_CMD_STAT_IX_ERR:			return -EINVAL;
717 	case MLX5_CMD_STAT_NO_RES_ERR:			return -EAGAIN;
718 	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:		return -EIO;
719 	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:		return -EIO;
720 	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:		return -EINVAL;
721 	case MLX5_CMD_STAT_BAD_PKT_ERR:			return -EINVAL;
722 	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:	return -EINVAL;
723 	default:					return -EIO;
724 	}
725 }
726 
727 struct mlx5_ifc_mbox_out_bits {
728 	u8         status[0x8];
729 	u8         reserved_at_8[0x18];
730 
731 	u8         syndrome[0x20];
732 
733 	u8         reserved_at_40[0x40];
734 };
735 
736 struct mlx5_ifc_mbox_in_bits {
737 	u8         opcode[0x10];
738 	u8         uid[0x10];
739 
740 	u8         reserved_at_20[0x10];
741 	u8         op_mod[0x10];
742 
743 	u8         reserved_at_40[0x40];
744 };
745 
mlx5_cmd_mbox_status(void * out,u8 * status,u32 * syndrome)746 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
747 {
748 	*status = MLX5_GET(mbox_out, out, status);
749 	*syndrome = MLX5_GET(mbox_out, out, syndrome);
750 }
751 
mlx5_cmd_check(struct mlx5_core_dev * dev,void * in,void * out)752 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
753 {
754 	u32 syndrome;
755 	u8  status;
756 	u16 opcode;
757 	u16 op_mod;
758 	u16 uid;
759 
760 	mlx5_cmd_mbox_status(out, &status, &syndrome);
761 	if (!status)
762 		return 0;
763 
764 	opcode = MLX5_GET(mbox_in, in, opcode);
765 	op_mod = MLX5_GET(mbox_in, in, op_mod);
766 	uid    = MLX5_GET(mbox_in, in, uid);
767 
768 	if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
769 		mlx5_core_err_rl(dev,
770 			"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
771 			mlx5_command_str(opcode), opcode, op_mod,
772 			cmd_status_str(status), status, syndrome);
773 	else
774 		mlx5_core_dbg(dev,
775 		      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
776 		      mlx5_command_str(opcode),
777 		      opcode, op_mod,
778 		      cmd_status_str(status),
779 		      status,
780 		      syndrome);
781 
782 	return cmd_status_to_err(status);
783 }
784 
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)785 static void dump_command(struct mlx5_core_dev *dev,
786 			 struct mlx5_cmd_work_ent *ent, int input)
787 {
788 	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
789 	u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
790 	struct mlx5_cmd_mailbox *next = msg->next;
791 	int n = mlx5_calc_cmd_blocks(msg);
792 	int data_only;
793 	u32 offset = 0;
794 	int dump_len;
795 	int i;
796 
797 	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
798 
799 	if (data_only)
800 		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
801 				   "dump command data %s(0x%x) %s\n",
802 				   mlx5_command_str(op), op,
803 				   input ? "INPUT" : "OUTPUT");
804 	else
805 		mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
806 			      mlx5_command_str(op), op,
807 			      input ? "INPUT" : "OUTPUT");
808 
809 	if (data_only) {
810 		if (input) {
811 			dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
812 			offset += sizeof(ent->lay->in);
813 		} else {
814 			dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
815 			offset += sizeof(ent->lay->out);
816 		}
817 	} else {
818 		dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
819 		offset += sizeof(*ent->lay);
820 	}
821 
822 	for (i = 0; i < n && next; i++)  {
823 		if (data_only) {
824 			dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
825 			dump_buf(next->buf, dump_len, 1, offset);
826 			offset += MLX5_CMD_DATA_BLOCK_SIZE;
827 		} else {
828 			mlx5_core_dbg(dev, "command block:\n");
829 			dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
830 			offset += sizeof(struct mlx5_cmd_prot_block);
831 		}
832 		next = next->next;
833 	}
834 
835 	if (data_only)
836 		pr_debug("\n");
837 }
838 
msg_to_opcode(struct mlx5_cmd_msg * in)839 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
840 {
841 	return MLX5_GET(mbox_in, in->first.data, opcode);
842 }
843 
844 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
845 
cb_timeout_handler(struct work_struct * work)846 static void cb_timeout_handler(struct work_struct *work)
847 {
848 	struct delayed_work *dwork = container_of(work, struct delayed_work,
849 						  work);
850 	struct mlx5_cmd_work_ent *ent = container_of(dwork,
851 						     struct mlx5_cmd_work_ent,
852 						     cb_timeout_work);
853 	struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
854 						 cmd);
855 
856 	mlx5_cmd_eq_recover(dev);
857 
858 	/* Maybe got handled by eq recover ? */
859 	if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
860 		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
861 			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
862 		goto out; /* phew, already handled */
863 	}
864 
865 	ent->ret = -ETIMEDOUT;
866 	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
867 		       ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
868 	mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
869 
870 out:
871 	cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
872 }
873 
874 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
875 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
876 			      struct mlx5_cmd_msg *msg);
877 
opcode_allowed(struct mlx5_cmd * cmd,u16 opcode)878 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
879 {
880 	if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
881 		return true;
882 
883 	return cmd->allowed_opcode == opcode;
884 }
885 
cmd_alloc_index_retry(struct mlx5_cmd * cmd)886 static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
887 {
888 	unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
889 	int idx;
890 
891 retry:
892 	idx = cmd_alloc_index(cmd);
893 	if (idx < 0 && time_before(jiffies, alloc_end)) {
894 		/* Index allocation can fail on heavy load of commands. This is a temporary
895 		 * situation as the current command already holds the semaphore, meaning that
896 		 * another command completion is being handled and it is expected to release
897 		 * the entry index soon.
898 		 */
899 		cpu_relax();
900 		goto retry;
901 	}
902 	return idx;
903 }
904 
mlx5_cmd_is_down(struct mlx5_core_dev * dev)905 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
906 {
907 	return pci_channel_offline(dev->pdev) ||
908 	       dev->cmd.state != MLX5_CMDIF_STATE_UP ||
909 	       dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
910 }
911 
cmd_work_handler(struct work_struct * work)912 static void cmd_work_handler(struct work_struct *work)
913 {
914 	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
915 	struct mlx5_cmd *cmd = ent->cmd;
916 	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
917 	unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
918 	struct mlx5_cmd_layout *lay;
919 	struct semaphore *sem;
920 	unsigned long flags;
921 	bool poll_cmd = ent->polling;
922 	int alloc_ret;
923 	int cmd_mode;
924 
925 	complete(&ent->handling);
926 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
927 	down(sem);
928 	if (!ent->page_queue) {
929 		alloc_ret = cmd_alloc_index_retry(cmd);
930 		if (alloc_ret < 0) {
931 			mlx5_core_err_rl(dev, "failed to allocate command entry\n");
932 			if (ent->callback) {
933 				ent->callback(-EAGAIN, ent->context);
934 				mlx5_free_cmd_msg(dev, ent->out);
935 				free_msg(dev, ent->in);
936 				cmd_ent_put(ent);
937 			} else {
938 				ent->ret = -EAGAIN;
939 				complete(&ent->done);
940 			}
941 			up(sem);
942 			return;
943 		}
944 		ent->idx = alloc_ret;
945 	} else {
946 		ent->idx = cmd->max_reg_cmds;
947 		spin_lock_irqsave(&cmd->alloc_lock, flags);
948 		clear_bit(ent->idx, &cmd->bitmask);
949 		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
950 	}
951 
952 	cmd->ent_arr[ent->idx] = ent;
953 	lay = get_inst(cmd, ent->idx);
954 	ent->lay = lay;
955 	memset(lay, 0, sizeof(*lay));
956 	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
957 	ent->op = be32_to_cpu(lay->in[0]) >> 16;
958 	if (ent->in->next)
959 		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
960 	lay->inlen = cpu_to_be32(ent->in->len);
961 	if (ent->out->next)
962 		lay->out_ptr = cpu_to_be64(ent->out->next->dma);
963 	lay->outlen = cpu_to_be32(ent->out->len);
964 	lay->type = MLX5_PCI_CMD_XPORT;
965 	lay->token = ent->token;
966 	lay->status_own = CMD_OWNER_HW;
967 	set_signature(ent, !cmd->checksum_disabled);
968 	dump_command(dev, ent, 1);
969 	ent->ts1 = ktime_get_ns();
970 	cmd_mode = cmd->mode;
971 
972 	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
973 		cmd_ent_get(ent);
974 	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
975 
976 	/* Skip sending command to fw if internal error */
977 	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
978 		u8 status = 0;
979 		u32 drv_synd;
980 
981 		ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
982 		MLX5_SET(mbox_out, ent->out, status, status);
983 		MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
984 
985 		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
986 		return;
987 	}
988 
989 	cmd_ent_get(ent); /* for the _real_ FW event on completion */
990 	/* ring doorbell after the descriptor is valid */
991 	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
992 	wmb();
993 	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
994 	/* if not in polling don't use ent after this point */
995 	if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
996 		poll_timeout(ent);
997 		/* make sure we read the descriptor after ownership is SW */
998 		rmb();
999 		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
1000 	}
1001 }
1002 
deliv_status_to_str(u8 status)1003 static const char *deliv_status_to_str(u8 status)
1004 {
1005 	switch (status) {
1006 	case MLX5_CMD_DELIVERY_STAT_OK:
1007 		return "no errors";
1008 	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1009 		return "signature error";
1010 	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1011 		return "token error";
1012 	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1013 		return "bad block number";
1014 	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1015 		return "output pointer not aligned to block size";
1016 	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1017 		return "input pointer not aligned to block size";
1018 	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1019 		return "firmware internal error";
1020 	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1021 		return "command input length error";
1022 	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1023 		return "command output length error";
1024 	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1025 		return "reserved fields not cleared";
1026 	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1027 		return "bad command descriptor type";
1028 	default:
1029 		return "unknown status code";
1030 	}
1031 }
1032 
1033 enum {
1034 	MLX5_CMD_TIMEOUT_RECOVER_MSEC   = 5 * 1000,
1035 };
1036 
wait_func_handle_exec_timeout(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1037 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1038 					  struct mlx5_cmd_work_ent *ent)
1039 {
1040 	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1041 
1042 	mlx5_cmd_eq_recover(dev);
1043 
1044 	/* Re-wait on the ent->done after executing the recovery flow. If the
1045 	 * recovery flow (or any other recovery flow running simultaneously)
1046 	 * has recovered an EQE, it should cause the entry to be completed by
1047 	 * the command interface.
1048 	 */
1049 	if (wait_for_completion_timeout(&ent->done, timeout)) {
1050 		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1051 			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1052 		return;
1053 	}
1054 
1055 	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1056 		       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1057 
1058 	ent->ret = -ETIMEDOUT;
1059 	mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1060 }
1061 
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1062 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1063 {
1064 	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
1065 	struct mlx5_cmd *cmd = &dev->cmd;
1066 	int err;
1067 
1068 	if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1069 	    cancel_work_sync(&ent->work)) {
1070 		ent->ret = -ECANCELED;
1071 		goto out_err;
1072 	}
1073 	if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1074 		wait_for_completion(&ent->done);
1075 	else if (!wait_for_completion_timeout(&ent->done, timeout))
1076 		wait_func_handle_exec_timeout(dev, ent);
1077 
1078 out_err:
1079 	err = ent->ret;
1080 
1081 	if (err == -ETIMEDOUT) {
1082 		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1083 			       mlx5_command_str(msg_to_opcode(ent->in)),
1084 			       msg_to_opcode(ent->in));
1085 	} else if (err == -ECANCELED) {
1086 		mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1087 			       mlx5_command_str(msg_to_opcode(ent->in)),
1088 			       msg_to_opcode(ent->in));
1089 	}
1090 	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1091 		      err, deliv_status_to_str(ent->status), ent->status);
1092 
1093 	return err;
1094 }
1095 
1096 /*  Notes:
1097  *    1. Callback functions may not sleep
1098  *    2. page queue commands do not support asynchrous completion
1099  */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 * status,u8 token,bool force_polling)1100 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1101 			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
1102 			   mlx5_cmd_cbk_t callback,
1103 			   void *context, int page_queue, u8 *status,
1104 			   u8 token, bool force_polling)
1105 {
1106 	struct mlx5_cmd *cmd = &dev->cmd;
1107 	struct mlx5_cmd_work_ent *ent;
1108 	struct mlx5_cmd_stats *stats;
1109 	int err = 0;
1110 	s64 ds;
1111 	u16 op;
1112 
1113 	if (callback && page_queue)
1114 		return -EINVAL;
1115 
1116 	ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1117 			    callback, context, page_queue);
1118 	if (IS_ERR(ent))
1119 		return PTR_ERR(ent);
1120 
1121 	/* put for this ent is when consumed, depending on the use case
1122 	 * 1) (!callback) blocking flow: by caller after wait_func completes
1123 	 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1124 	 */
1125 
1126 	ent->token = token;
1127 	ent->polling = force_polling;
1128 
1129 	init_completion(&ent->handling);
1130 	if (!callback)
1131 		init_completion(&ent->done);
1132 
1133 	INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1134 	INIT_WORK(&ent->work, cmd_work_handler);
1135 	if (page_queue) {
1136 		cmd_work_handler(&ent->work);
1137 	} else if (!queue_work(cmd->wq, &ent->work)) {
1138 		mlx5_core_warn(dev, "failed to queue work\n");
1139 		err = -ENOMEM;
1140 		goto out_free;
1141 	}
1142 
1143 	if (callback)
1144 		goto out; /* mlx5_cmd_comp_handler() will put(ent) */
1145 
1146 	err = wait_func(dev, ent);
1147 	if (err == -ETIMEDOUT || err == -ECANCELED)
1148 		goto out_free;
1149 
1150 	ds = ent->ts2 - ent->ts1;
1151 	op = MLX5_GET(mbox_in, in->first.data, opcode);
1152 	if (op < MLX5_CMD_OP_MAX) {
1153 		stats = &cmd->stats[op];
1154 		spin_lock_irq(&stats->lock);
1155 		stats->sum += ds;
1156 		++stats->n;
1157 		spin_unlock_irq(&stats->lock);
1158 	}
1159 	mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1160 			   "fw exec time for %s is %lld nsec\n",
1161 			   mlx5_command_str(op), ds);
1162 	*status = ent->status;
1163 
1164 out_free:
1165 	cmd_ent_put(ent);
1166 out:
1167 	return err;
1168 }
1169 
dbg_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1170 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1171 			 size_t count, loff_t *pos)
1172 {
1173 	struct mlx5_core_dev *dev = filp->private_data;
1174 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1175 	char lbuf[3];
1176 	int err;
1177 
1178 	if (!dbg->in_msg || !dbg->out_msg)
1179 		return -ENOMEM;
1180 
1181 	if (count < sizeof(lbuf) - 1)
1182 		return -EINVAL;
1183 
1184 	if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1185 		return -EFAULT;
1186 
1187 	lbuf[sizeof(lbuf) - 1] = 0;
1188 
1189 	if (strcmp(lbuf, "go"))
1190 		return -EINVAL;
1191 
1192 	err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1193 
1194 	return err ? err : count;
1195 }
1196 
1197 static const struct file_operations fops = {
1198 	.owner	= THIS_MODULE,
1199 	.open	= simple_open,
1200 	.write	= dbg_write,
1201 };
1202 
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,int size,u8 token)1203 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1204 			    u8 token)
1205 {
1206 	struct mlx5_cmd_prot_block *block;
1207 	struct mlx5_cmd_mailbox *next;
1208 	int copy;
1209 
1210 	if (!to || !from)
1211 		return -ENOMEM;
1212 
1213 	copy = min_t(int, size, sizeof(to->first.data));
1214 	memcpy(to->first.data, from, copy);
1215 	size -= copy;
1216 	from += copy;
1217 
1218 	next = to->next;
1219 	while (size) {
1220 		if (!next) {
1221 			/* this is a BUG */
1222 			return -ENOMEM;
1223 		}
1224 
1225 		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1226 		block = next->buf;
1227 		memcpy(block->data, from, copy);
1228 		from += copy;
1229 		size -= copy;
1230 		block->token = token;
1231 		next = next->next;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)1237 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1238 {
1239 	struct mlx5_cmd_prot_block *block;
1240 	struct mlx5_cmd_mailbox *next;
1241 	int copy;
1242 
1243 	if (!to || !from)
1244 		return -ENOMEM;
1245 
1246 	copy = min_t(int, size, sizeof(from->first.data));
1247 	memcpy(to, from->first.data, copy);
1248 	size -= copy;
1249 	to += copy;
1250 
1251 	next = from->next;
1252 	while (size) {
1253 		if (!next) {
1254 			/* this is a BUG */
1255 			return -ENOMEM;
1256 		}
1257 
1258 		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1259 		block = next->buf;
1260 
1261 		memcpy(to, block->data, copy);
1262 		to += copy;
1263 		size -= copy;
1264 		next = next->next;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
alloc_cmd_box(struct mlx5_core_dev * dev,gfp_t flags)1270 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1271 					      gfp_t flags)
1272 {
1273 	struct mlx5_cmd_mailbox *mailbox;
1274 
1275 	mailbox = kmalloc(sizeof(*mailbox), flags);
1276 	if (!mailbox)
1277 		return ERR_PTR(-ENOMEM);
1278 
1279 	mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1280 				       &mailbox->dma);
1281 	if (!mailbox->buf) {
1282 		mlx5_core_dbg(dev, "failed allocation\n");
1283 		kfree(mailbox);
1284 		return ERR_PTR(-ENOMEM);
1285 	}
1286 	mailbox->next = NULL;
1287 
1288 	return mailbox;
1289 }
1290 
free_cmd_box(struct mlx5_core_dev * dev,struct mlx5_cmd_mailbox * mailbox)1291 static void free_cmd_box(struct mlx5_core_dev *dev,
1292 			 struct mlx5_cmd_mailbox *mailbox)
1293 {
1294 	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1295 	kfree(mailbox);
1296 }
1297 
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,int size,u8 token)1298 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1299 					       gfp_t flags, int size,
1300 					       u8 token)
1301 {
1302 	struct mlx5_cmd_mailbox *tmp, *head = NULL;
1303 	struct mlx5_cmd_prot_block *block;
1304 	struct mlx5_cmd_msg *msg;
1305 	int err;
1306 	int n;
1307 	int i;
1308 
1309 	msg = kzalloc(sizeof(*msg), flags);
1310 	if (!msg)
1311 		return ERR_PTR(-ENOMEM);
1312 
1313 	msg->len = size;
1314 	n = mlx5_calc_cmd_blocks(msg);
1315 
1316 	for (i = 0; i < n; i++) {
1317 		tmp = alloc_cmd_box(dev, flags);
1318 		if (IS_ERR(tmp)) {
1319 			mlx5_core_warn(dev, "failed allocating block\n");
1320 			err = PTR_ERR(tmp);
1321 			goto err_alloc;
1322 		}
1323 
1324 		block = tmp->buf;
1325 		tmp->next = head;
1326 		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1327 		block->block_num = cpu_to_be32(n - i - 1);
1328 		block->token = token;
1329 		head = tmp;
1330 	}
1331 	msg->next = head;
1332 	return msg;
1333 
1334 err_alloc:
1335 	while (head) {
1336 		tmp = head->next;
1337 		free_cmd_box(dev, head);
1338 		head = tmp;
1339 	}
1340 	kfree(msg);
1341 
1342 	return ERR_PTR(err);
1343 }
1344 
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1345 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1346 			      struct mlx5_cmd_msg *msg)
1347 {
1348 	struct mlx5_cmd_mailbox *head = msg->next;
1349 	struct mlx5_cmd_mailbox *next;
1350 
1351 	while (head) {
1352 		next = head->next;
1353 		free_cmd_box(dev, head);
1354 		head = next;
1355 	}
1356 	kfree(msg);
1357 }
1358 
data_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1359 static ssize_t data_write(struct file *filp, const char __user *buf,
1360 			  size_t count, loff_t *pos)
1361 {
1362 	struct mlx5_core_dev *dev = filp->private_data;
1363 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1364 	void *ptr;
1365 
1366 	if (*pos != 0)
1367 		return -EINVAL;
1368 
1369 	kfree(dbg->in_msg);
1370 	dbg->in_msg = NULL;
1371 	dbg->inlen = 0;
1372 	ptr = memdup_user(buf, count);
1373 	if (IS_ERR(ptr))
1374 		return PTR_ERR(ptr);
1375 	dbg->in_msg = ptr;
1376 	dbg->inlen = count;
1377 
1378 	*pos = count;
1379 
1380 	return count;
1381 }
1382 
data_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1383 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1384 			 loff_t *pos)
1385 {
1386 	struct mlx5_core_dev *dev = filp->private_data;
1387 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1388 
1389 	if (!dbg->out_msg)
1390 		return -ENOMEM;
1391 
1392 	return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1393 				       dbg->outlen);
1394 }
1395 
1396 static const struct file_operations dfops = {
1397 	.owner	= THIS_MODULE,
1398 	.open	= simple_open,
1399 	.write	= data_write,
1400 	.read	= data_read,
1401 };
1402 
outlen_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1403 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1404 			   loff_t *pos)
1405 {
1406 	struct mlx5_core_dev *dev = filp->private_data;
1407 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1408 	char outlen[8];
1409 	int err;
1410 
1411 	err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1412 	if (err < 0)
1413 		return err;
1414 
1415 	return simple_read_from_buffer(buf, count, pos, outlen, err);
1416 }
1417 
outlen_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1418 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1419 			    size_t count, loff_t *pos)
1420 {
1421 	struct mlx5_core_dev *dev = filp->private_data;
1422 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1423 	char outlen_str[8] = {0};
1424 	int outlen;
1425 	void *ptr;
1426 	int err;
1427 
1428 	if (*pos != 0 || count > 6)
1429 		return -EINVAL;
1430 
1431 	kfree(dbg->out_msg);
1432 	dbg->out_msg = NULL;
1433 	dbg->outlen = 0;
1434 
1435 	if (copy_from_user(outlen_str, buf, count))
1436 		return -EFAULT;
1437 
1438 	err = sscanf(outlen_str, "%d", &outlen);
1439 	if (err < 0)
1440 		return err;
1441 
1442 	ptr = kzalloc(outlen, GFP_KERNEL);
1443 	if (!ptr)
1444 		return -ENOMEM;
1445 
1446 	dbg->out_msg = ptr;
1447 	dbg->outlen = outlen;
1448 
1449 	*pos = count;
1450 
1451 	return count;
1452 }
1453 
1454 static const struct file_operations olfops = {
1455 	.owner	= THIS_MODULE,
1456 	.open	= simple_open,
1457 	.write	= outlen_write,
1458 	.read	= outlen_read,
1459 };
1460 
set_wqname(struct mlx5_core_dev * dev)1461 static void set_wqname(struct mlx5_core_dev *dev)
1462 {
1463 	struct mlx5_cmd *cmd = &dev->cmd;
1464 
1465 	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1466 		 dev_name(dev->device));
1467 }
1468 
clean_debug_files(struct mlx5_core_dev * dev)1469 static void clean_debug_files(struct mlx5_core_dev *dev)
1470 {
1471 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1472 
1473 	if (!mlx5_debugfs_root)
1474 		return;
1475 
1476 	mlx5_cmdif_debugfs_cleanup(dev);
1477 	debugfs_remove_recursive(dbg->dbg_root);
1478 }
1479 
create_debugfs_files(struct mlx5_core_dev * dev)1480 static void create_debugfs_files(struct mlx5_core_dev *dev)
1481 {
1482 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1483 
1484 	dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1485 
1486 	debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1487 	debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1488 	debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1489 	debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1490 	debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1491 
1492 	mlx5_cmdif_debugfs_init(dev);
1493 }
1494 
mlx5_cmd_allowed_opcode(struct mlx5_core_dev * dev,u16 opcode)1495 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1496 {
1497 	struct mlx5_cmd *cmd = &dev->cmd;
1498 	int i;
1499 
1500 	for (i = 0; i < cmd->max_reg_cmds; i++)
1501 		down(&cmd->sem);
1502 	down(&cmd->pages_sem);
1503 
1504 	cmd->allowed_opcode = opcode;
1505 
1506 	up(&cmd->pages_sem);
1507 	for (i = 0; i < cmd->max_reg_cmds; i++)
1508 		up(&cmd->sem);
1509 }
1510 
mlx5_cmd_change_mod(struct mlx5_core_dev * dev,int mode)1511 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1512 {
1513 	struct mlx5_cmd *cmd = &dev->cmd;
1514 	int i;
1515 
1516 	for (i = 0; i < cmd->max_reg_cmds; i++)
1517 		down(&cmd->sem);
1518 	down(&cmd->pages_sem);
1519 
1520 	cmd->mode = mode;
1521 
1522 	up(&cmd->pages_sem);
1523 	for (i = 0; i < cmd->max_reg_cmds; i++)
1524 		up(&cmd->sem);
1525 }
1526 
cmd_comp_notifier(struct notifier_block * nb,unsigned long type,void * data)1527 static int cmd_comp_notifier(struct notifier_block *nb,
1528 			     unsigned long type, void *data)
1529 {
1530 	struct mlx5_core_dev *dev;
1531 	struct mlx5_cmd *cmd;
1532 	struct mlx5_eqe *eqe;
1533 
1534 	cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1535 	dev = container_of(cmd, struct mlx5_core_dev, cmd);
1536 	eqe = data;
1537 
1538 	mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1539 
1540 	return NOTIFY_OK;
1541 }
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1542 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1543 {
1544 	MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1545 	mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1546 	mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1547 }
1548 
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1549 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1550 {
1551 	mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1552 	mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1553 }
1554 
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1555 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1556 {
1557 	unsigned long flags;
1558 
1559 	if (msg->parent) {
1560 		spin_lock_irqsave(&msg->parent->lock, flags);
1561 		list_add_tail(&msg->list, &msg->parent->head);
1562 		spin_unlock_irqrestore(&msg->parent->lock, flags);
1563 	} else {
1564 		mlx5_free_cmd_msg(dev, msg);
1565 	}
1566 }
1567 
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,u64 vec,bool forced)1568 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1569 {
1570 	struct mlx5_cmd *cmd = &dev->cmd;
1571 	struct mlx5_cmd_work_ent *ent;
1572 	mlx5_cmd_cbk_t callback;
1573 	void *context;
1574 	int err;
1575 	int i;
1576 	s64 ds;
1577 	struct mlx5_cmd_stats *stats;
1578 	unsigned long flags;
1579 	unsigned long vector;
1580 
1581 	/* there can be at most 32 command queues */
1582 	vector = vec & 0xffffffff;
1583 	for (i = 0; i < (1 << cmd->log_sz); i++) {
1584 		if (test_bit(i, &vector)) {
1585 			struct semaphore *sem;
1586 
1587 			ent = cmd->ent_arr[i];
1588 
1589 			/* if we already completed the command, ignore it */
1590 			if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1591 						&ent->state)) {
1592 				/* only real completion can free the cmd slot */
1593 				if (!forced) {
1594 					mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1595 						      ent->idx);
1596 					cmd_ent_put(ent);
1597 				}
1598 				continue;
1599 			}
1600 
1601 			if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1602 				cmd_ent_put(ent); /* timeout work was canceled */
1603 
1604 			if (!forced || /* Real FW completion */
1605 			    pci_channel_offline(dev->pdev) || /* FW is inaccessible */
1606 			    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1607 				cmd_ent_put(ent);
1608 
1609 			if (ent->page_queue)
1610 				sem = &cmd->pages_sem;
1611 			else
1612 				sem = &cmd->sem;
1613 			ent->ts2 = ktime_get_ns();
1614 			memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1615 			dump_command(dev, ent, 0);
1616 			if (!ent->ret) {
1617 				if (!cmd->checksum_disabled)
1618 					ent->ret = verify_signature(ent);
1619 				else
1620 					ent->ret = 0;
1621 				if (vec & MLX5_TRIGGERED_CMD_COMP)
1622 					ent->status = MLX5_DRIVER_STATUS_ABORTED;
1623 				else
1624 					ent->status = ent->lay->status_own >> 1;
1625 
1626 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1627 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
1628 			}
1629 
1630 			if (ent->callback) {
1631 				ds = ent->ts2 - ent->ts1;
1632 				if (ent->op < MLX5_CMD_OP_MAX) {
1633 					stats = &cmd->stats[ent->op];
1634 					spin_lock_irqsave(&stats->lock, flags);
1635 					stats->sum += ds;
1636 					++stats->n;
1637 					spin_unlock_irqrestore(&stats->lock, flags);
1638 				}
1639 
1640 				callback = ent->callback;
1641 				context = ent->context;
1642 				err = ent->ret;
1643 				if (!err) {
1644 					err = mlx5_copy_from_msg(ent->uout,
1645 								 ent->out,
1646 								 ent->uout_size);
1647 
1648 					err = err ? err : mlx5_cmd_check(dev,
1649 									ent->in->first.data,
1650 									ent->uout);
1651 				}
1652 
1653 				mlx5_free_cmd_msg(dev, ent->out);
1654 				free_msg(dev, ent->in);
1655 
1656 				err = err ? err : ent->status;
1657 				/* final consumer is done, release ent */
1658 				cmd_ent_put(ent);
1659 				callback(err, context);
1660 			} else {
1661 				/* release wait_func() so mlx5_cmd_invoke()
1662 				 * can make the final ent_put()
1663 				 */
1664 				complete(&ent->done);
1665 			}
1666 			up(sem);
1667 		}
1668 	}
1669 }
1670 
mlx5_cmd_trigger_completions(struct mlx5_core_dev * dev)1671 void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1672 {
1673 	struct mlx5_cmd *cmd = &dev->cmd;
1674 	unsigned long bitmask;
1675 	unsigned long flags;
1676 	u64 vector;
1677 	int i;
1678 
1679 	/* wait for pending handlers to complete */
1680 	mlx5_eq_synchronize_cmd_irq(dev);
1681 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1682 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1683 	if (!vector)
1684 		goto no_trig;
1685 
1686 	bitmask = vector;
1687 	/* we must increment the allocated entries refcount before triggering the completions
1688 	 * to guarantee pending commands will not get freed in the meanwhile.
1689 	 * For that reason, it also has to be done inside the alloc_lock.
1690 	 */
1691 	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1692 		cmd_ent_get(cmd->ent_arr[i]);
1693 	vector |= MLX5_TRIGGERED_CMD_COMP;
1694 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1695 
1696 	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1697 	mlx5_cmd_comp_handler(dev, vector, true);
1698 	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1699 		cmd_ent_put(cmd->ent_arr[i]);
1700 	return;
1701 
1702 no_trig:
1703 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1704 }
1705 
mlx5_cmd_flush(struct mlx5_core_dev * dev)1706 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1707 {
1708 	struct mlx5_cmd *cmd = &dev->cmd;
1709 	int i;
1710 
1711 	for (i = 0; i < cmd->max_reg_cmds; i++)
1712 		while (down_trylock(&cmd->sem))
1713 			mlx5_cmd_trigger_completions(dev);
1714 
1715 	while (down_trylock(&cmd->pages_sem))
1716 		mlx5_cmd_trigger_completions(dev);
1717 
1718 	/* Unlock cmdif */
1719 	up(&cmd->pages_sem);
1720 	for (i = 0; i < cmd->max_reg_cmds; i++)
1721 		up(&cmd->sem);
1722 }
1723 
status_to_err(u8 status)1724 static int status_to_err(u8 status)
1725 {
1726 	switch (status) {
1727 	case MLX5_CMD_DELIVERY_STAT_OK:
1728 	case MLX5_DRIVER_STATUS_ABORTED:
1729 		return 0;
1730 	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1731 	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1732 		return -EBADR;
1733 	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1734 	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1735 	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1736 		return -EFAULT; /* Bad address */
1737 	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1738 	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1739 	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1740 	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1741 		return -ENOMSG;
1742 	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1743 		return -EIO;
1744 	default:
1745 		return -EINVAL;
1746 	}
1747 }
1748 
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1749 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1750 				      gfp_t gfp)
1751 {
1752 	struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1753 	struct cmd_msg_cache *ch = NULL;
1754 	struct mlx5_cmd *cmd = &dev->cmd;
1755 	int i;
1756 
1757 	if (in_size <= 16)
1758 		goto cache_miss;
1759 
1760 	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1761 		ch = &cmd->cache[i];
1762 		if (in_size > ch->max_inbox_size)
1763 			continue;
1764 		spin_lock_irq(&ch->lock);
1765 		if (list_empty(&ch->head)) {
1766 			spin_unlock_irq(&ch->lock);
1767 			continue;
1768 		}
1769 		msg = list_entry(ch->head.next, typeof(*msg), list);
1770 		/* For cached lists, we must explicitly state what is
1771 		 * the real size
1772 		 */
1773 		msg->len = in_size;
1774 		list_del(&msg->list);
1775 		spin_unlock_irq(&ch->lock);
1776 		break;
1777 	}
1778 
1779 	if (!IS_ERR(msg))
1780 		return msg;
1781 
1782 cache_miss:
1783 	msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1784 	return msg;
1785 }
1786 
is_manage_pages(void * in)1787 static int is_manage_pages(void *in)
1788 {
1789 	return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1790 }
1791 
cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context,bool force_polling)1792 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1793 		    int out_size, mlx5_cmd_cbk_t callback, void *context,
1794 		    bool force_polling)
1795 {
1796 	struct mlx5_cmd_msg *inb;
1797 	struct mlx5_cmd_msg *outb;
1798 	int pages_queue;
1799 	gfp_t gfp;
1800 	int err;
1801 	u8 status = 0;
1802 	u32 drv_synd;
1803 	u16 opcode;
1804 	u8 token;
1805 
1806 	opcode = MLX5_GET(mbox_in, in, opcode);
1807 	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
1808 		err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1809 		MLX5_SET(mbox_out, out, status, status);
1810 		MLX5_SET(mbox_out, out, syndrome, drv_synd);
1811 		return err;
1812 	}
1813 
1814 	pages_queue = is_manage_pages(in);
1815 	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1816 
1817 	inb = alloc_msg(dev, in_size, gfp);
1818 	if (IS_ERR(inb)) {
1819 		err = PTR_ERR(inb);
1820 		return err;
1821 	}
1822 
1823 	token = alloc_token(&dev->cmd);
1824 
1825 	err = mlx5_copy_to_msg(inb, in, in_size, token);
1826 	if (err) {
1827 		mlx5_core_warn(dev, "err %d\n", err);
1828 		goto out_in;
1829 	}
1830 
1831 	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1832 	if (IS_ERR(outb)) {
1833 		err = PTR_ERR(outb);
1834 		goto out_in;
1835 	}
1836 
1837 	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1838 			      pages_queue, &status, token, force_polling);
1839 	if (err)
1840 		goto out_out;
1841 
1842 	mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1843 	if (status) {
1844 		err = status_to_err(status);
1845 		goto out_out;
1846 	}
1847 
1848 	if (!callback)
1849 		err = mlx5_copy_from_msg(out, outb, out_size);
1850 
1851 out_out:
1852 	if (!callback)
1853 		mlx5_free_cmd_msg(dev, outb);
1854 
1855 out_in:
1856 	if (!callback)
1857 		free_msg(dev, inb);
1858 	return err;
1859 }
1860 
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1861 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1862 		  int out_size)
1863 {
1864 	int err;
1865 
1866 	err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1867 	return err ? : mlx5_cmd_check(dev, in, out);
1868 }
1869 EXPORT_SYMBOL(mlx5_cmd_exec);
1870 
mlx5_cmd_init_async_ctx(struct mlx5_core_dev * dev,struct mlx5_async_ctx * ctx)1871 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1872 			     struct mlx5_async_ctx *ctx)
1873 {
1874 	ctx->dev = dev;
1875 	/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1876 	atomic_set(&ctx->num_inflight, 1);
1877 	init_waitqueue_head(&ctx->wait);
1878 }
1879 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1880 
1881 /**
1882  * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1883  * @ctx: The ctx to clean
1884  *
1885  * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1886  * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1887  * the call mlx5_cleanup_async_ctx().
1888  */
mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx * ctx)1889 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1890 {
1891 	atomic_dec(&ctx->num_inflight);
1892 	wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1893 }
1894 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1895 
mlx5_cmd_exec_cb_handler(int status,void * _work)1896 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1897 {
1898 	struct mlx5_async_work *work = _work;
1899 	struct mlx5_async_ctx *ctx = work->ctx;
1900 
1901 	work->user_callback(status, work);
1902 	if (atomic_dec_and_test(&ctx->num_inflight))
1903 		wake_up(&ctx->wait);
1904 }
1905 
mlx5_cmd_exec_cb(struct mlx5_async_ctx * ctx,void * in,int in_size,void * out,int out_size,mlx5_async_cbk_t callback,struct mlx5_async_work * work)1906 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1907 		     void *out, int out_size, mlx5_async_cbk_t callback,
1908 		     struct mlx5_async_work *work)
1909 {
1910 	int ret;
1911 
1912 	work->ctx = ctx;
1913 	work->user_callback = callback;
1914 	if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1915 		return -EIO;
1916 	ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
1917 		       mlx5_cmd_exec_cb_handler, work, false);
1918 	if (ret && atomic_dec_and_test(&ctx->num_inflight))
1919 		wake_up(&ctx->wait);
1920 
1921 	return ret;
1922 }
1923 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1924 
mlx5_cmd_exec_polling(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1925 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1926 			  void *out, int out_size)
1927 {
1928 	int err;
1929 
1930 	err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1931 
1932 	return err ? : mlx5_cmd_check(dev, in, out);
1933 }
1934 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1935 
destroy_msg_cache(struct mlx5_core_dev * dev)1936 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1937 {
1938 	struct cmd_msg_cache *ch;
1939 	struct mlx5_cmd_msg *msg;
1940 	struct mlx5_cmd_msg *n;
1941 	int i;
1942 
1943 	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1944 		ch = &dev->cmd.cache[i];
1945 		list_for_each_entry_safe(msg, n, &ch->head, list) {
1946 			list_del(&msg->list);
1947 			mlx5_free_cmd_msg(dev, msg);
1948 		}
1949 	}
1950 }
1951 
1952 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1953 	512, 32, 16, 8, 2
1954 };
1955 
1956 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1957 	16 + MLX5_CMD_DATA_BLOCK_SIZE,
1958 	16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1959 	16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1960 	16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1961 	16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1962 };
1963 
create_msg_cache(struct mlx5_core_dev * dev)1964 static void create_msg_cache(struct mlx5_core_dev *dev)
1965 {
1966 	struct mlx5_cmd *cmd = &dev->cmd;
1967 	struct cmd_msg_cache *ch;
1968 	struct mlx5_cmd_msg *msg;
1969 	int i;
1970 	int k;
1971 
1972 	/* Initialize and fill the caches with initial entries */
1973 	for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1974 		ch = &cmd->cache[k];
1975 		spin_lock_init(&ch->lock);
1976 		INIT_LIST_HEAD(&ch->head);
1977 		ch->num_ent = cmd_cache_num_ent[k];
1978 		ch->max_inbox_size = cmd_cache_ent_size[k];
1979 		for (i = 0; i < ch->num_ent; i++) {
1980 			msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1981 						 ch->max_inbox_size, 0);
1982 			if (IS_ERR(msg))
1983 				break;
1984 			msg->parent = ch;
1985 			list_add_tail(&msg->list, &ch->head);
1986 		}
1987 	}
1988 }
1989 
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1990 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1991 {
1992 	cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
1993 						&cmd->alloc_dma, GFP_KERNEL);
1994 	if (!cmd->cmd_alloc_buf)
1995 		return -ENOMEM;
1996 
1997 	/* make sure it is aligned to 4K */
1998 	if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1999 		cmd->cmd_buf = cmd->cmd_alloc_buf;
2000 		cmd->dma = cmd->alloc_dma;
2001 		cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
2002 		return 0;
2003 	}
2004 
2005 	dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
2006 			  cmd->alloc_dma);
2007 	cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2008 						2 * MLX5_ADAPTER_PAGE_SIZE - 1,
2009 						&cmd->alloc_dma, GFP_KERNEL);
2010 	if (!cmd->cmd_alloc_buf)
2011 		return -ENOMEM;
2012 
2013 	cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
2014 	cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
2015 	cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
2016 	return 0;
2017 }
2018 
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)2019 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
2020 {
2021 	dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2022 			  cmd->alloc_dma);
2023 }
2024 
cmdif_rev(struct mlx5_core_dev * dev)2025 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2026 {
2027 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2028 }
2029 
mlx5_cmd_init(struct mlx5_core_dev * dev)2030 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2031 {
2032 	int size = sizeof(struct mlx5_cmd_prot_block);
2033 	int align = roundup_pow_of_two(size);
2034 	struct mlx5_cmd *cmd = &dev->cmd;
2035 	u32 cmd_h, cmd_l;
2036 	u16 cmd_if_rev;
2037 	int err;
2038 	int i;
2039 
2040 	memset(cmd, 0, sizeof(*cmd));
2041 	cmd_if_rev = cmdif_rev(dev);
2042 	if (cmd_if_rev != CMD_IF_REV) {
2043 		mlx5_core_err(dev,
2044 			      "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2045 			      CMD_IF_REV, cmd_if_rev);
2046 		return -EINVAL;
2047 	}
2048 
2049 	cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
2050 	if (!cmd->stats)
2051 		return -ENOMEM;
2052 
2053 	cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2054 	if (!cmd->pool) {
2055 		err = -ENOMEM;
2056 		goto dma_pool_err;
2057 	}
2058 
2059 	err = alloc_cmd_page(dev, cmd);
2060 	if (err)
2061 		goto err_free_pool;
2062 
2063 	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2064 	cmd->log_sz = cmd_l >> 4 & 0xf;
2065 	cmd->log_stride = cmd_l & 0xf;
2066 	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
2067 		mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2068 			      1 << cmd->log_sz);
2069 		err = -EINVAL;
2070 		goto err_free_page;
2071 	}
2072 
2073 	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2074 		mlx5_core_err(dev, "command queue size overflow\n");
2075 		err = -EINVAL;
2076 		goto err_free_page;
2077 	}
2078 
2079 	cmd->state = MLX5_CMDIF_STATE_DOWN;
2080 	cmd->checksum_disabled = 1;
2081 	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2082 	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2083 
2084 	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2085 	if (cmd->cmdif_rev > CMD_IF_REV) {
2086 		mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2087 			      CMD_IF_REV, cmd->cmdif_rev);
2088 		err = -EOPNOTSUPP;
2089 		goto err_free_page;
2090 	}
2091 
2092 	spin_lock_init(&cmd->alloc_lock);
2093 	spin_lock_init(&cmd->token_lock);
2094 	for (i = 0; i < MLX5_CMD_OP_MAX; i++)
2095 		spin_lock_init(&cmd->stats[i].lock);
2096 
2097 	sema_init(&cmd->sem, cmd->max_reg_cmds);
2098 	sema_init(&cmd->pages_sem, 1);
2099 
2100 	cmd_h = (u32)((u64)(cmd->dma) >> 32);
2101 	cmd_l = (u32)(cmd->dma);
2102 	if (cmd_l & 0xfff) {
2103 		mlx5_core_err(dev, "invalid command queue address\n");
2104 		err = -ENOMEM;
2105 		goto err_free_page;
2106 	}
2107 
2108 	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2109 	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2110 
2111 	/* Make sure firmware sees the complete address before we proceed */
2112 	wmb();
2113 
2114 	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2115 
2116 	cmd->mode = CMD_MODE_POLLING;
2117 	cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2118 
2119 	create_msg_cache(dev);
2120 
2121 	set_wqname(dev);
2122 	cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2123 	if (!cmd->wq) {
2124 		mlx5_core_err(dev, "failed to create command workqueue\n");
2125 		err = -ENOMEM;
2126 		goto err_cache;
2127 	}
2128 
2129 	create_debugfs_files(dev);
2130 
2131 	return 0;
2132 
2133 err_cache:
2134 	destroy_msg_cache(dev);
2135 
2136 err_free_page:
2137 	free_cmd_page(dev, cmd);
2138 
2139 err_free_pool:
2140 	dma_pool_destroy(cmd->pool);
2141 dma_pool_err:
2142 	kvfree(cmd->stats);
2143 	return err;
2144 }
2145 EXPORT_SYMBOL(mlx5_cmd_init);
2146 
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)2147 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2148 {
2149 	struct mlx5_cmd *cmd = &dev->cmd;
2150 
2151 	clean_debug_files(dev);
2152 	destroy_workqueue(cmd->wq);
2153 	destroy_msg_cache(dev);
2154 	free_cmd_page(dev, cmd);
2155 	dma_pool_destroy(cmd->pool);
2156 	kvfree(cmd->stats);
2157 }
2158 EXPORT_SYMBOL(mlx5_cmd_cleanup);
2159 
mlx5_cmd_set_state(struct mlx5_core_dev * dev,enum mlx5_cmdif_state cmdif_state)2160 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2161 			enum mlx5_cmdif_state cmdif_state)
2162 {
2163 	dev->cmd.state = cmdif_state;
2164 }
2165 EXPORT_SYMBOL(mlx5_cmd_set_state);
2166