1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/random.h>
36 #include <linux/vmalloc.h>
37 #include <linux/hardirq.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/cmd.h>
40 #include "mlx5_core.h"
41 #include "lib/eq.h"
42 #include "lib/mlx5.h"
43 #include "lib/pci_vsc.h"
44 #include "diag/fw_tracer.h"
45 
46 enum {
47 	MLX5_HEALTH_POLL_INTERVAL	= 2 * HZ,
48 	MAX_MISSES			= 3,
49 };
50 
51 enum {
52 	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1,
53 	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7,
54 	MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR	= 0x8,
55 	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9,
56 	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa,
57 	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb,
58 	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc,
59 	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd,
60 	MLX5_HEALTH_SYNDR_EQ_INV		= 0xe,
61 	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf,
62 	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
63 };
64 
65 enum {
66 	MLX5_DROP_NEW_HEALTH_WORK,
67 };
68 
69 enum  {
70 	MLX5_SENSOR_NO_ERR		= 0,
71 	MLX5_SENSOR_PCI_COMM_ERR	= 1,
72 	MLX5_SENSOR_PCI_ERR		= 2,
73 	MLX5_SENSOR_NIC_DISABLED	= 3,
74 	MLX5_SENSOR_NIC_SW_RESET	= 4,
75 	MLX5_SENSOR_FW_SYND_RFR		= 5,
76 };
77 
mlx5_get_nic_state(struct mlx5_core_dev * dev)78 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
79 {
80 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
81 }
82 
mlx5_set_nic_state(struct mlx5_core_dev * dev,u8 state)83 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
84 {
85 	u32 cur_cmdq_addr_l_sz;
86 
87 	cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
88 	iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
89 		    state << MLX5_NIC_IFC_OFFSET,
90 		    &dev->iseg->cmdq_addr_l_sz);
91 }
92 
sensor_pci_not_working(struct mlx5_core_dev * dev)93 static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
94 {
95 	struct mlx5_core_health *health = &dev->priv.health;
96 	struct health_buffer __iomem *h = health->health;
97 
98 	/* Offline PCI reads return 0xffffffff */
99 	return (ioread32be(&h->fw_ver) == 0xffffffff);
100 }
101 
sensor_fw_synd_rfr(struct mlx5_core_dev * dev)102 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
103 {
104 	struct mlx5_core_health *health = &dev->priv.health;
105 	struct health_buffer __iomem *h = health->health;
106 	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
107 	u8 synd = ioread8(&h->synd);
108 
109 	if (rfr && synd)
110 		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
111 	return rfr && synd;
112 }
113 
check_fatal_sensors(struct mlx5_core_dev * dev)114 static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
115 {
116 	if (sensor_pci_not_working(dev))
117 		return MLX5_SENSOR_PCI_COMM_ERR;
118 	if (pci_channel_offline(dev->pdev))
119 		return MLX5_SENSOR_PCI_ERR;
120 	if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
121 		return MLX5_SENSOR_NIC_DISABLED;
122 	if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
123 		return MLX5_SENSOR_NIC_SW_RESET;
124 	if (sensor_fw_synd_rfr(dev))
125 		return MLX5_SENSOR_FW_SYND_RFR;
126 
127 	return MLX5_SENSOR_NO_ERR;
128 }
129 
lock_sem_sw_reset(struct mlx5_core_dev * dev,bool lock)130 static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock)
131 {
132 	enum mlx5_vsc_state state;
133 	int ret;
134 
135 	if (!mlx5_core_is_pf(dev))
136 		return -EBUSY;
137 
138 	/* Try to lock GW access, this stage doesn't return
139 	 * EBUSY because locked GW does not mean that other PF
140 	 * already started the reset.
141 	 */
142 	ret = mlx5_vsc_gw_lock(dev);
143 	if (ret == -EBUSY)
144 		return -EINVAL;
145 	if (ret)
146 		return ret;
147 
148 	state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK;
149 	/* At this stage, if the return status == EBUSY, then we know
150 	 * for sure that another PF started the reset, so don't allow
151 	 * another reset.
152 	 */
153 	ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state);
154 	if (ret)
155 		mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
156 
157 	/* Unlock GW access */
158 	mlx5_vsc_gw_unlock(dev);
159 
160 	return ret;
161 }
162 
reset_fw_if_needed(struct mlx5_core_dev * dev)163 static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
164 {
165 	bool supported = (ioread32be(&dev->iseg->initializing) >>
166 			  MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
167 	u32 fatal_error;
168 
169 	if (!supported)
170 		return false;
171 
172 	/* The reset only needs to be issued by one PF. The health buffer is
173 	 * shared between all functions, and will be cleared during a reset.
174 	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
175 	 * PCI related a reset won't help.
176 	 */
177 	fatal_error = check_fatal_sensors(dev);
178 	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
179 	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
180 	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
181 		mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.");
182 		return false;
183 	}
184 
185 	mlx5_core_warn(dev, "Issuing FW Reset\n");
186 	/* Write the NIC interface field to initiate the reset, the command
187 	 * interface address also resides here, don't overwrite it.
188 	 */
189 	mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
190 
191 	return true;
192 }
193 
mlx5_enter_error_state(struct mlx5_core_dev * dev,bool force)194 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
195 {
196 	mutex_lock(&dev->intf_state_mutex);
197 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
198 		goto unlock;
199 	if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
200 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
201 		goto unlock;
202 	}
203 
204 	if (check_fatal_sensors(dev) || force) {
205 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
206 		mlx5_cmd_flush(dev);
207 	}
208 
209 	mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
210 unlock:
211 	mutex_unlock(&dev->intf_state_mutex);
212 }
213 
214 #define MLX5_CRDUMP_WAIT_MS	60000
215 #define MLX5_FW_RESET_WAIT_MS	1000
mlx5_error_sw_reset(struct mlx5_core_dev * dev)216 void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
217 {
218 	unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
219 	int lock = -EBUSY;
220 
221 	mutex_lock(&dev->intf_state_mutex);
222 	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
223 		goto unlock;
224 
225 	mlx5_core_err(dev, "start\n");
226 
227 	if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
228 		/* Get cr-dump and reset FW semaphore */
229 		lock = lock_sem_sw_reset(dev, true);
230 
231 		if (lock == -EBUSY) {
232 			delay_ms = MLX5_CRDUMP_WAIT_MS;
233 			goto recover_from_sw_reset;
234 		}
235 		/* Execute SW reset */
236 		reset_fw_if_needed(dev);
237 	}
238 
239 recover_from_sw_reset:
240 	/* Recover from SW reset */
241 	end = jiffies + msecs_to_jiffies(delay_ms);
242 	do {
243 		if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
244 			break;
245 
246 		cond_resched();
247 	} while (!time_after(jiffies, end));
248 
249 	if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
250 		dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
251 			mlx5_get_nic_state(dev), delay_ms);
252 	}
253 
254 	/* Release FW semaphore if you are the lock owner */
255 	if (!lock)
256 		lock_sem_sw_reset(dev, false);
257 
258 	mlx5_core_err(dev, "end\n");
259 
260 unlock:
261 	mutex_unlock(&dev->intf_state_mutex);
262 }
263 
mlx5_handle_bad_state(struct mlx5_core_dev * dev)264 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
265 {
266 	u8 nic_interface = mlx5_get_nic_state(dev);
267 
268 	switch (nic_interface) {
269 	case MLX5_NIC_IFC_FULL:
270 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
271 		break;
272 
273 	case MLX5_NIC_IFC_DISABLED:
274 		mlx5_core_warn(dev, "starting teardown\n");
275 		break;
276 
277 	case MLX5_NIC_IFC_NO_DRAM_NIC:
278 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
279 		break;
280 
281 	case MLX5_NIC_IFC_SW_RESET:
282 		/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
283 		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
284 		 *    and this is a VF), this is not recoverable by SW reset.
285 		 *    Logging of this is handled elsewhere.
286 		 * 2. FW reset has been issued by another function, driver can
287 		 *    be reloaded to recover after the mode switches to
288 		 *    MLX5_NIC_IFC_DISABLED.
289 		 */
290 		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
291 			mlx5_core_warn(dev, "NIC SW reset in progress\n");
292 		break;
293 
294 	default:
295 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
296 			       nic_interface);
297 	}
298 
299 	mlx5_disable_device(dev);
300 }
301 
302 /* How much time to wait until health resetting the driver (in msecs) */
303 #define MLX5_RECOVERY_WAIT_MSECS 60000
mlx5_health_try_recover(struct mlx5_core_dev * dev)304 static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
305 {
306 	unsigned long end;
307 
308 	mlx5_core_warn(dev, "handling bad device here\n");
309 	mlx5_handle_bad_state(dev);
310 	end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
311 	while (sensor_pci_not_working(dev)) {
312 		if (time_after(jiffies, end)) {
313 			mlx5_core_err(dev,
314 				      "health recovery flow aborted, PCI reads still not working\n");
315 			return -EIO;
316 		}
317 		msleep(100);
318 	}
319 
320 	mlx5_core_err(dev, "starting health recovery flow\n");
321 	mlx5_recover_device(dev);
322 	if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
323 	    check_fatal_sensors(dev)) {
324 		mlx5_core_err(dev, "health recovery failed\n");
325 		return -EIO;
326 	}
327 	return 0;
328 }
329 
hsynd_str(u8 synd)330 static const char *hsynd_str(u8 synd)
331 {
332 	switch (synd) {
333 	case MLX5_HEALTH_SYNDR_FW_ERR:
334 		return "firmware internal error";
335 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
336 		return "irisc not responding";
337 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
338 		return "unrecoverable hardware error";
339 	case MLX5_HEALTH_SYNDR_CRC_ERR:
340 		return "firmware CRC error";
341 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
342 		return "ICM fetch PCI error";
343 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
344 		return "HW fatal error\n";
345 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
346 		return "async EQ buffer overrun";
347 	case MLX5_HEALTH_SYNDR_EQ_ERR:
348 		return "EQ error";
349 	case MLX5_HEALTH_SYNDR_EQ_INV:
350 		return "Invalid EQ referenced";
351 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
352 		return "FFSER error";
353 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
354 		return "High temperature";
355 	default:
356 		return "unrecognized error";
357 	}
358 }
359 
print_health_info(struct mlx5_core_dev * dev)360 static void print_health_info(struct mlx5_core_dev *dev)
361 {
362 	struct mlx5_core_health *health = &dev->priv.health;
363 	struct health_buffer __iomem *h = health->health;
364 	char fw_str[18];
365 	u32 fw;
366 	int i;
367 
368 	/* If the syndrome is 0, the device is OK and no need to print buffer */
369 	if (!ioread8(&h->synd))
370 		return;
371 
372 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
373 		mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
374 			      ioread32be(h->assert_var + i));
375 
376 	mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
377 		      ioread32be(&h->assert_exit_ptr));
378 	mlx5_core_err(dev, "assert_callra 0x%08x\n",
379 		      ioread32be(&h->assert_callra));
380 	sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
381 	mlx5_core_err(dev, "fw_ver %s\n", fw_str);
382 	mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
383 	mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
384 	mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
385 		      hsynd_str(ioread8(&h->synd)));
386 	mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
387 	fw = ioread32be(&h->fw_ver);
388 	mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
389 }
390 
391 static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg)392 mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
393 			  struct devlink_fmsg *fmsg)
394 {
395 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
396 	struct mlx5_core_health *health = &dev->priv.health;
397 	struct health_buffer __iomem *h = health->health;
398 	u8 synd;
399 	int err;
400 
401 	synd = ioread8(&h->synd);
402 	err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
403 	if (err || !synd)
404 		return err;
405 	return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
406 }
407 
408 struct mlx5_fw_reporter_ctx {
409 	u8 err_synd;
410 	int miss_counter;
411 };
412 
413 static int
mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg * fmsg,struct mlx5_fw_reporter_ctx * fw_reporter_ctx)414 mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
415 			       struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
416 {
417 	int err;
418 
419 	err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
420 				       fw_reporter_ctx->err_synd);
421 	if (err)
422 		return err;
423 	err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
424 					fw_reporter_ctx->miss_counter);
425 	if (err)
426 		return err;
427 	return 0;
428 }
429 
430 static int
mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev * dev,struct devlink_fmsg * fmsg)431 mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
432 				       struct devlink_fmsg *fmsg)
433 {
434 	struct mlx5_core_health *health = &dev->priv.health;
435 	struct health_buffer __iomem *h = health->health;
436 	int err;
437 	int i;
438 
439 	if (!ioread8(&h->synd))
440 		return 0;
441 
442 	err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
443 	if (err)
444 		return err;
445 	err = devlink_fmsg_obj_nest_start(fmsg);
446 	if (err)
447 		return err;
448 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
449 	if (err)
450 		return err;
451 
452 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
453 		err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
454 		if (err)
455 			return err;
456 	}
457 	err = devlink_fmsg_arr_pair_nest_end(fmsg);
458 	if (err)
459 		return err;
460 	err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
461 					ioread32be(&h->assert_exit_ptr));
462 	if (err)
463 		return err;
464 	err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
465 					ioread32be(&h->assert_callra));
466 	if (err)
467 		return err;
468 	err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
469 	if (err)
470 		return err;
471 	err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
472 				       ioread8(&h->irisc_index));
473 	if (err)
474 		return err;
475 	err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
476 	if (err)
477 		return err;
478 	err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
479 					ioread16be(&h->ext_synd));
480 	if (err)
481 		return err;
482 	err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
483 					ioread32be(&h->fw_ver));
484 	if (err)
485 		return err;
486 	err = devlink_fmsg_obj_nest_end(fmsg);
487 	if (err)
488 		return err;
489 	return devlink_fmsg_pair_nest_end(fmsg);
490 }
491 
492 static int
mlx5_fw_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx)493 mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
494 		      struct devlink_fmsg *fmsg, void *priv_ctx)
495 {
496 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
497 	int err;
498 
499 	err = mlx5_fw_tracer_trigger_core_dump_general(dev);
500 	if (err)
501 		return err;
502 
503 	if (priv_ctx) {
504 		struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
505 
506 		err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
507 		if (err)
508 			return err;
509 	}
510 
511 	err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
512 	if (err)
513 		return err;
514 	return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
515 }
516 
mlx5_fw_reporter_err_work(struct work_struct * work)517 static void mlx5_fw_reporter_err_work(struct work_struct *work)
518 {
519 	struct mlx5_fw_reporter_ctx fw_reporter_ctx;
520 	struct mlx5_core_health *health;
521 
522 	health = container_of(work, struct mlx5_core_health, report_work);
523 
524 	if (IS_ERR_OR_NULL(health->fw_reporter))
525 		return;
526 
527 	fw_reporter_ctx.err_synd = health->synd;
528 	fw_reporter_ctx.miss_counter = health->miss_counter;
529 	if (fw_reporter_ctx.err_synd) {
530 		devlink_health_report(health->fw_reporter,
531 				      "FW syndrom reported", &fw_reporter_ctx);
532 		return;
533 	}
534 	if (fw_reporter_ctx.miss_counter)
535 		devlink_health_report(health->fw_reporter,
536 				      "FW miss counter reported",
537 				      &fw_reporter_ctx);
538 }
539 
540 static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
541 		.name = "fw",
542 		.diagnose = mlx5_fw_reporter_diagnose,
543 		.dump = mlx5_fw_reporter_dump,
544 };
545 
546 static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter * reporter,void * priv_ctx)547 mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
548 			       void *priv_ctx)
549 {
550 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
551 
552 	return mlx5_health_try_recover(dev);
553 }
554 
555 #define MLX5_CR_DUMP_CHUNK_SIZE 256
556 static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * priv_ctx)557 mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
558 			    struct devlink_fmsg *fmsg, void *priv_ctx)
559 {
560 	struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
561 	u32 crdump_size = dev->priv.health.crdump_size;
562 	u32 *cr_data;
563 	u32 data_size;
564 	u32 offset;
565 	int err;
566 
567 	if (!mlx5_core_is_pf(dev))
568 		return -EPERM;
569 
570 	cr_data = kvmalloc(crdump_size, GFP_KERNEL);
571 	if (!cr_data)
572 		return -ENOMEM;
573 	err = mlx5_crdump_collect(dev, cr_data);
574 	if (err)
575 		goto free_data;
576 
577 	if (priv_ctx) {
578 		struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
579 
580 		err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
581 		if (err)
582 			goto free_data;
583 	}
584 
585 	err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
586 	if (err)
587 		goto free_data;
588 	for (offset = 0; offset < crdump_size; offset += data_size) {
589 		if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
590 			data_size = crdump_size - offset;
591 		else
592 			data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 		err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 					      data_size);
595 		if (err)
596 			goto free_data;
597 	}
598 	err = devlink_fmsg_arr_pair_nest_end(fmsg);
599 
600 free_data:
601 	kvfree(cr_data);
602 	return err;
603 }
604 
mlx5_fw_fatal_reporter_err_work(struct work_struct * work)605 static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
606 {
607 	struct mlx5_fw_reporter_ctx fw_reporter_ctx;
608 	struct mlx5_core_health *health;
609 	struct mlx5_core_dev *dev;
610 	struct mlx5_priv *priv;
611 
612 	health = container_of(work, struct mlx5_core_health, fatal_report_work);
613 	priv = container_of(health, struct mlx5_priv, health);
614 	dev = container_of(priv, struct mlx5_core_dev, priv);
615 
616 	mlx5_enter_error_state(dev, false);
617 	if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
618 		if (mlx5_health_try_recover(dev))
619 			mlx5_core_err(dev, "health recovery failed\n");
620 		return;
621 	}
622 	fw_reporter_ctx.err_synd = health->synd;
623 	fw_reporter_ctx.miss_counter = health->miss_counter;
624 	devlink_health_report(health->fw_fatal_reporter,
625 			      "FW fatal error reported", &fw_reporter_ctx);
626 }
627 
628 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
629 		.name = "fw_fatal",
630 		.recover = mlx5_fw_fatal_reporter_recover,
631 		.dump = mlx5_fw_fatal_reporter_dump,
632 };
633 
634 #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
mlx5_fw_reporters_create(struct mlx5_core_dev * dev)635 static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
636 {
637 	struct mlx5_core_health *health = &dev->priv.health;
638 	struct devlink *devlink = priv_to_devlink(dev);
639 
640 	health->fw_reporter =
641 		devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
642 					       0, false, dev);
643 	if (IS_ERR(health->fw_reporter))
644 		mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
645 			       PTR_ERR(health->fw_reporter));
646 
647 	health->fw_fatal_reporter =
648 		devlink_health_reporter_create(devlink,
649 					       &mlx5_fw_fatal_reporter_ops,
650 					       MLX5_REPORTER_FW_GRACEFUL_PERIOD,
651 					       true, dev);
652 	if (IS_ERR(health->fw_fatal_reporter))
653 		mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
654 			       PTR_ERR(health->fw_fatal_reporter));
655 }
656 
mlx5_fw_reporters_destroy(struct mlx5_core_dev * dev)657 static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
658 {
659 	struct mlx5_core_health *health = &dev->priv.health;
660 
661 	if (!IS_ERR_OR_NULL(health->fw_reporter))
662 		devlink_health_reporter_destroy(health->fw_reporter);
663 
664 	if (!IS_ERR_OR_NULL(health->fw_fatal_reporter))
665 		devlink_health_reporter_destroy(health->fw_fatal_reporter);
666 }
667 
get_next_poll_jiffies(void)668 static unsigned long get_next_poll_jiffies(void)
669 {
670 	unsigned long next;
671 
672 	get_random_bytes(&next, sizeof(next));
673 	next %= HZ;
674 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
675 
676 	return next;
677 }
678 
mlx5_trigger_health_work(struct mlx5_core_dev * dev)679 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
680 {
681 	struct mlx5_core_health *health = &dev->priv.health;
682 	unsigned long flags;
683 
684 	spin_lock_irqsave(&health->wq_lock, flags);
685 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
686 		queue_work(health->wq, &health->fatal_report_work);
687 	else
688 		mlx5_core_err(dev, "new health works are not permitted at this stage\n");
689 	spin_unlock_irqrestore(&health->wq_lock, flags);
690 }
691 
poll_health(struct timer_list * t)692 static void poll_health(struct timer_list *t)
693 {
694 	struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
695 	struct mlx5_core_health *health = &dev->priv.health;
696 	struct health_buffer __iomem *h = health->health;
697 	u32 fatal_error;
698 	u8 prev_synd;
699 	u32 count;
700 
701 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
702 		goto out;
703 
704 	fatal_error = check_fatal_sensors(dev);
705 
706 	if (fatal_error && !health->fatal_error) {
707 		mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 		dev->priv.health.fatal_error = fatal_error;
709 		print_health_info(dev);
710 		mlx5_trigger_health_work(dev);
711 		goto out;
712 	}
713 
714 	count = ioread32be(health->health_counter);
715 	if (count == health->prev)
716 		++health->miss_counter;
717 	else
718 		health->miss_counter = 0;
719 
720 	health->prev = count;
721 	if (health->miss_counter == MAX_MISSES) {
722 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
723 		print_health_info(dev);
724 		queue_work(health->wq, &health->report_work);
725 	}
726 
727 	prev_synd = health->synd;
728 	health->synd = ioread8(&h->synd);
729 	if (health->synd && health->synd != prev_synd)
730 		queue_work(health->wq, &health->report_work);
731 
732 out:
733 	mod_timer(&health->timer, get_next_poll_jiffies());
734 }
735 
mlx5_start_health_poll(struct mlx5_core_dev * dev)736 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
737 {
738 	struct mlx5_core_health *health = &dev->priv.health;
739 
740 	timer_setup(&health->timer, poll_health, 0);
741 	health->fatal_error = MLX5_SENSOR_NO_ERR;
742 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
743 	health->health = &dev->iseg->health;
744 	health->health_counter = &dev->iseg->health_counter;
745 
746 	health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
747 	add_timer(&health->timer);
748 }
749 
mlx5_stop_health_poll(struct mlx5_core_dev * dev,bool disable_health)750 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
751 {
752 	struct mlx5_core_health *health = &dev->priv.health;
753 	unsigned long flags;
754 
755 	if (disable_health) {
756 		spin_lock_irqsave(&health->wq_lock, flags);
757 		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
758 		spin_unlock_irqrestore(&health->wq_lock, flags);
759 	}
760 
761 	del_timer_sync(&health->timer);
762 }
763 
mlx5_drain_health_wq(struct mlx5_core_dev * dev)764 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
765 {
766 	struct mlx5_core_health *health = &dev->priv.health;
767 	unsigned long flags;
768 
769 	spin_lock_irqsave(&health->wq_lock, flags);
770 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
771 	spin_unlock_irqrestore(&health->wq_lock, flags);
772 	cancel_work_sync(&health->report_work);
773 	cancel_work_sync(&health->fatal_report_work);
774 }
775 
mlx5_health_flush(struct mlx5_core_dev * dev)776 void mlx5_health_flush(struct mlx5_core_dev *dev)
777 {
778 	struct mlx5_core_health *health = &dev->priv.health;
779 
780 	flush_workqueue(health->wq);
781 }
782 
mlx5_health_cleanup(struct mlx5_core_dev * dev)783 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
784 {
785 	struct mlx5_core_health *health = &dev->priv.health;
786 
787 	destroy_workqueue(health->wq);
788 	mlx5_fw_reporters_destroy(dev);
789 }
790 
mlx5_health_init(struct mlx5_core_dev * dev)791 int mlx5_health_init(struct mlx5_core_dev *dev)
792 {
793 	struct mlx5_core_health *health;
794 	char *name;
795 
796 	mlx5_fw_reporters_create(dev);
797 
798 	health = &dev->priv.health;
799 	name = kmalloc(64, GFP_KERNEL);
800 	if (!name)
801 		goto out_err;
802 
803 	strcpy(name, "mlx5_health");
804 	strcat(name, dev_name(dev->device));
805 	health->wq = create_singlethread_workqueue(name);
806 	kfree(name);
807 	if (!health->wq)
808 		goto out_err;
809 	spin_lock_init(&health->wq_lock);
810 	INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
811 	INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
812 
813 	return 0;
814 
815 out_err:
816 	mlx5_fw_reporters_destroy(dev);
817 	return -ENOMEM;
818 }
819