1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3 
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6 
7 #include "core.h"
8 
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10 
pdsc_register_notify(struct notifier_block * nb)11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13 	return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16 
pdsc_unregister_notify(struct notifier_block * nb)17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19 	blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22 
pdsc_notify(unsigned long event,void * data)23 void pdsc_notify(unsigned long event, void *data)
24 {
25 	blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27 
pdsc_intr_free(struct pdsc * pdsc,int index)28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30 	struct pdsc_intr_info *intr_info;
31 
32 	if (index >= pdsc->nintrs || index < 0) {
33 		WARN(true, "bad intr index %d\n", index);
34 		return;
35 	}
36 
37 	intr_info = &pdsc->intr_info[index];
38 	if (!intr_info->vector)
39 		return;
40 	dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41 		__func__, index, intr_info->vector, intr_info->name);
42 
43 	pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44 	pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45 
46 	free_irq(intr_info->vector, intr_info->data);
47 
48 	memset(intr_info, 0, sizeof(*intr_info));
49 }
50 
pdsc_intr_alloc(struct pdsc * pdsc,char * name,irq_handler_t handler,void * data)51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52 		    irq_handler_t handler, void *data)
53 {
54 	struct pdsc_intr_info *intr_info;
55 	unsigned int index;
56 	int err;
57 
58 	/* Find the first available interrupt */
59 	for (index = 0; index < pdsc->nintrs; index++)
60 		if (!pdsc->intr_info[index].vector)
61 			break;
62 	if (index >= pdsc->nintrs) {
63 		dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64 			 __func__, index, pdsc->nintrs);
65 		return -ENOSPC;
66 	}
67 
68 	pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69 				  PDS_CORE_INTR_CRED_RESET_COALESCE);
70 
71 	intr_info = &pdsc->intr_info[index];
72 
73 	intr_info->index = index;
74 	intr_info->data = data;
75 	strscpy(intr_info->name, name, sizeof(intr_info->name));
76 
77 	/* Get the OS vector number for the interrupt */
78 	err = pci_irq_vector(pdsc->pdev, index);
79 	if (err < 0) {
80 		dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81 			index, ERR_PTR(err));
82 		goto err_out_free_intr;
83 	}
84 	intr_info->vector = err;
85 
86 	/* Init the device's intr mask */
87 	pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88 	pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89 	pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90 
91 	/* Register the isr with a name */
92 	err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93 	if (err) {
94 		dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95 			intr_info->vector, ERR_PTR(err));
96 		goto err_out_free_intr;
97 	}
98 
99 	return index;
100 
101 err_out_free_intr:
102 	pdsc_intr_free(pdsc, index);
103 	return err;
104 }
105 
pdsc_qcq_intr_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108 	if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109 	    qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110 		return;
111 
112 	pdsc_intr_free(pdsc, qcq->intx);
113 	qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115 
pdsc_qcq_intr_alloc(struct pdsc * pdsc,struct pdsc_qcq * qcq)116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118 	char name[PDSC_INTR_NAME_MAX_SZ];
119 	int index;
120 
121 	if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122 		qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123 		return 0;
124 	}
125 
126 	snprintf(name, sizeof(name), "%s-%d-%s",
127 		 PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128 	index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
129 	if (index < 0)
130 		return index;
131 	qcq->intx = index;
132 
133 	return 0;
134 }
135 
pdsc_qcq_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)136 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
137 {
138 	struct device *dev = pdsc->dev;
139 
140 	if (!(qcq && qcq->pdsc))
141 		return;
142 
143 	pdsc_debugfs_del_qcq(qcq);
144 
145 	pdsc_qcq_intr_free(pdsc, qcq);
146 
147 	if (qcq->q_base)
148 		dma_free_coherent(dev, qcq->q_size,
149 				  qcq->q_base, qcq->q_base_pa);
150 
151 	if (qcq->cq_base)
152 		dma_free_coherent(dev, qcq->cq_size,
153 				  qcq->cq_base, qcq->cq_base_pa);
154 
155 	if (qcq->cq.info)
156 		vfree(qcq->cq.info);
157 
158 	if (qcq->q.info)
159 		vfree(qcq->q.info);
160 
161 	memset(qcq, 0, sizeof(*qcq));
162 }
163 
pdsc_q_map(struct pdsc_queue * q,void * base,dma_addr_t base_pa)164 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
165 {
166 	struct pdsc_q_info *cur;
167 	unsigned int i;
168 
169 	q->base = base;
170 	q->base_pa = base_pa;
171 
172 	for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
173 		cur->desc = base + (i * q->desc_size);
174 }
175 
pdsc_cq_map(struct pdsc_cq * cq,void * base,dma_addr_t base_pa)176 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
177 {
178 	struct pdsc_cq_info *cur;
179 	unsigned int i;
180 
181 	cq->base = base;
182 	cq->base_pa = base_pa;
183 
184 	for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
185 		cur->comp = base + (i * cq->desc_size);
186 }
187 
pdsc_qcq_alloc(struct pdsc * pdsc,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int pid,struct pdsc_qcq * qcq)188 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
189 		   const char *name, unsigned int flags, unsigned int num_descs,
190 		   unsigned int desc_size, unsigned int cq_desc_size,
191 		   unsigned int pid, struct pdsc_qcq *qcq)
192 {
193 	struct device *dev = pdsc->dev;
194 	void *q_base, *cq_base;
195 	dma_addr_t cq_base_pa;
196 	dma_addr_t q_base_pa;
197 	int err;
198 
199 	qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
200 	if (!qcq->q.info) {
201 		err = -ENOMEM;
202 		goto err_out;
203 	}
204 
205 	qcq->pdsc = pdsc;
206 	qcq->flags = flags;
207 	INIT_WORK(&qcq->work, pdsc_work_thread);
208 
209 	qcq->q.type = type;
210 	qcq->q.index = index;
211 	qcq->q.num_descs = num_descs;
212 	qcq->q.desc_size = desc_size;
213 	qcq->q.tail_idx = 0;
214 	qcq->q.head_idx = 0;
215 	qcq->q.pid = pid;
216 	snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
217 
218 	err = pdsc_qcq_intr_alloc(pdsc, qcq);
219 	if (err)
220 		goto err_out_free_q_info;
221 
222 	qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
223 	if (!qcq->cq.info) {
224 		err = -ENOMEM;
225 		goto err_out_free_irq;
226 	}
227 
228 	qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
229 	qcq->cq.num_descs = num_descs;
230 	qcq->cq.desc_size = cq_desc_size;
231 	qcq->cq.tail_idx = 0;
232 	qcq->cq.done_color = 1;
233 
234 	if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
235 		/* q & cq need to be contiguous in case of notifyq */
236 		qcq->q_size = PDS_PAGE_SIZE +
237 			      ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
238 			      ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
239 		qcq->q_base = dma_alloc_coherent(dev,
240 						 qcq->q_size + qcq->cq_size,
241 						 &qcq->q_base_pa,
242 						 GFP_KERNEL);
243 		if (!qcq->q_base) {
244 			err = -ENOMEM;
245 			goto err_out_free_cq_info;
246 		}
247 		q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
248 		q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
249 		pdsc_q_map(&qcq->q, q_base, q_base_pa);
250 
251 		cq_base = PTR_ALIGN(q_base +
252 				    ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
253 				    PDS_PAGE_SIZE);
254 		cq_base_pa = ALIGN(qcq->q_base_pa +
255 				   ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
256 				   PDS_PAGE_SIZE);
257 
258 	} else {
259 		/* q DMA descriptors */
260 		qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
261 		qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
262 						 &qcq->q_base_pa,
263 						 GFP_KERNEL);
264 		if (!qcq->q_base) {
265 			err = -ENOMEM;
266 			goto err_out_free_cq_info;
267 		}
268 		q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
269 		q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
270 		pdsc_q_map(&qcq->q, q_base, q_base_pa);
271 
272 		/* cq DMA descriptors */
273 		qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
274 		qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
275 						  &qcq->cq_base_pa,
276 						  GFP_KERNEL);
277 		if (!qcq->cq_base) {
278 			err = -ENOMEM;
279 			goto err_out_free_q;
280 		}
281 		cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
282 		cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
283 	}
284 
285 	pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
286 	qcq->cq.bound_q = &qcq->q;
287 
288 	pdsc_debugfs_add_qcq(pdsc, qcq);
289 
290 	return 0;
291 
292 err_out_free_q:
293 	dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
294 err_out_free_cq_info:
295 	vfree(qcq->cq.info);
296 err_out_free_irq:
297 	pdsc_qcq_intr_free(pdsc, qcq);
298 err_out_free_q_info:
299 	vfree(qcq->q.info);
300 	memset(qcq, 0, sizeof(*qcq));
301 err_out:
302 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
303 	return err;
304 }
305 
pdsc_core_init(struct pdsc * pdsc)306 static int pdsc_core_init(struct pdsc *pdsc)
307 {
308 	union pds_core_dev_comp comp = {};
309 	union pds_core_dev_cmd cmd = {
310 		.init.opcode = PDS_CORE_CMD_INIT,
311 	};
312 	struct pds_core_dev_init_data_out cido;
313 	struct pds_core_dev_init_data_in cidi;
314 	u32 dbid_count;
315 	u32 dbpage_num;
316 	size_t sz;
317 	int err;
318 
319 	cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
320 	cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
321 	cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
322 	cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
323 	cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
324 	cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
325 	cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
326 
327 	mutex_lock(&pdsc->devcmd_lock);
328 
329 	sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
330 	memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
331 
332 	err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
333 	if (!err) {
334 		sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
335 		memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
336 	}
337 
338 	mutex_unlock(&pdsc->devcmd_lock);
339 	if (err) {
340 		dev_err(pdsc->dev, "Device init command failed: %pe\n",
341 			ERR_PTR(err));
342 		return err;
343 	}
344 
345 	pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
346 
347 	dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
348 	dbpage_num = pdsc->hw_index * dbid_count;
349 	pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
350 	if (!pdsc->kern_dbpage) {
351 		dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
352 		return -ENOMEM;
353 	}
354 
355 	pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
356 	pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
357 	pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
358 
359 	pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
360 	pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
361 	pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
362 
363 	pdsc->last_eid = 0;
364 
365 	return err;
366 }
367 
368 static struct pdsc_viftype pdsc_viftype_defaults[] = {
369 	[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
370 				.vif_id = PDS_DEV_TYPE_VDPA,
371 				.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
372 	[PDS_DEV_TYPE_MAX] = {}
373 };
374 
pdsc_viftypes_init(struct pdsc * pdsc)375 static int pdsc_viftypes_init(struct pdsc *pdsc)
376 {
377 	enum pds_core_vif_types vt;
378 
379 	pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
380 				       GFP_KERNEL);
381 	if (!pdsc->viftype_status)
382 		return -ENOMEM;
383 
384 	for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
385 		bool vt_support;
386 
387 		if (!pdsc_viftype_defaults[vt].name)
388 			continue;
389 
390 		/* Grab the defaults */
391 		pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
392 
393 		/* See what the Core device has for support */
394 		vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
395 		dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
396 			pdsc->viftype_status[vt].name,
397 			vt_support ? "" : "not ");
398 
399 		pdsc->viftype_status[vt].supported = vt_support;
400 	}
401 
402 	return 0;
403 }
404 
pdsc_setup(struct pdsc * pdsc,bool init)405 int pdsc_setup(struct pdsc *pdsc, bool init)
406 {
407 	int numdescs;
408 	int err;
409 
410 	if (init)
411 		err = pdsc_dev_init(pdsc);
412 	else
413 		err = pdsc_dev_reinit(pdsc);
414 	if (err)
415 		return err;
416 
417 	/* Scale the descriptor ring length based on number of CPUs and VFs */
418 	numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
419 	numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
420 	numdescs = roundup_pow_of_two(numdescs);
421 	err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
422 			     PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
423 			     numdescs,
424 			     sizeof(union pds_core_adminq_cmd),
425 			     sizeof(union pds_core_adminq_comp),
426 			     0, &pdsc->adminqcq);
427 	if (err)
428 		goto err_out_teardown;
429 
430 	err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
431 			     PDS_CORE_QCQ_F_NOTIFYQ,
432 			     PDSC_NOTIFYQ_LENGTH,
433 			     sizeof(struct pds_core_notifyq_cmd),
434 			     sizeof(union pds_core_notifyq_comp),
435 			     0, &pdsc->notifyqcq);
436 	if (err)
437 		goto err_out_teardown;
438 
439 	/* NotifyQ rides on the AdminQ interrupt */
440 	pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
441 
442 	/* Set up the Core with the AdminQ and NotifyQ info */
443 	err = pdsc_core_init(pdsc);
444 	if (err)
445 		goto err_out_teardown;
446 
447 	/* Set up the VIFs */
448 	err = pdsc_viftypes_init(pdsc);
449 	if (err)
450 		goto err_out_teardown;
451 
452 	if (init)
453 		pdsc_debugfs_add_viftype(pdsc);
454 
455 	clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
456 	return 0;
457 
458 err_out_teardown:
459 	pdsc_teardown(pdsc, init);
460 	return err;
461 }
462 
pdsc_teardown(struct pdsc * pdsc,bool removing)463 void pdsc_teardown(struct pdsc *pdsc, bool removing)
464 {
465 	int i;
466 
467 	if (!pdsc->pdev->is_virtfn)
468 		pdsc_devcmd_reset(pdsc);
469 	pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
470 	pdsc_qcq_free(pdsc, &pdsc->adminqcq);
471 
472 	kfree(pdsc->viftype_status);
473 	pdsc->viftype_status = NULL;
474 
475 	if (pdsc->intr_info) {
476 		for (i = 0; i < pdsc->nintrs; i++)
477 			pdsc_intr_free(pdsc, i);
478 
479 		if (removing) {
480 			kfree(pdsc->intr_info);
481 			pdsc->intr_info = NULL;
482 		}
483 	}
484 
485 	if (pdsc->kern_dbpage) {
486 		iounmap(pdsc->kern_dbpage);
487 		pdsc->kern_dbpage = NULL;
488 	}
489 
490 	set_bit(PDSC_S_FW_DEAD, &pdsc->state);
491 }
492 
pdsc_start(struct pdsc * pdsc)493 int pdsc_start(struct pdsc *pdsc)
494 {
495 	pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
496 			   PDS_CORE_INTR_MASK_CLEAR);
497 
498 	return 0;
499 }
500 
pdsc_stop(struct pdsc * pdsc)501 void pdsc_stop(struct pdsc *pdsc)
502 {
503 	int i;
504 
505 	if (!pdsc->intr_info)
506 		return;
507 
508 	/* Mask interrupts that are in use */
509 	for (i = 0; i < pdsc->nintrs; i++)
510 		if (pdsc->intr_info[i].vector)
511 			pds_core_intr_mask(&pdsc->intr_ctrl[i],
512 					   PDS_CORE_INTR_MASK_SET);
513 }
514 
pdsc_fw_down(struct pdsc * pdsc)515 static void pdsc_fw_down(struct pdsc *pdsc)
516 {
517 	union pds_core_notifyq_comp reset_event = {
518 		.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
519 		.reset.state = 0,
520 	};
521 
522 	if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
523 		dev_err(pdsc->dev, "%s: already happening\n", __func__);
524 		return;
525 	}
526 
527 	/* Notify clients of fw_down */
528 	if (pdsc->fw_reporter)
529 		devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
530 	pdsc_notify(PDS_EVENT_RESET, &reset_event);
531 
532 	pdsc_stop(pdsc);
533 	pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
534 }
535 
pdsc_fw_up(struct pdsc * pdsc)536 static void pdsc_fw_up(struct pdsc *pdsc)
537 {
538 	union pds_core_notifyq_comp reset_event = {
539 		.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
540 		.reset.state = 1,
541 	};
542 	int err;
543 
544 	if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
545 		dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
546 		return;
547 	}
548 
549 	err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
550 	if (err)
551 		goto err_out;
552 
553 	err = pdsc_start(pdsc);
554 	if (err)
555 		goto err_out;
556 
557 	/* Notify clients of fw_up */
558 	pdsc->fw_recoveries++;
559 	if (pdsc->fw_reporter)
560 		devlink_health_reporter_state_update(pdsc->fw_reporter,
561 						     DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
562 	pdsc_notify(PDS_EVENT_RESET, &reset_event);
563 
564 	return;
565 
566 err_out:
567 	pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
568 }
569 
pdsc_health_thread(struct work_struct * work)570 void pdsc_health_thread(struct work_struct *work)
571 {
572 	struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
573 	unsigned long mask;
574 	bool healthy;
575 
576 	mutex_lock(&pdsc->config_lock);
577 
578 	/* Don't do a check when in a transition state */
579 	mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
580 	       BIT_ULL(PDSC_S_STOPPING_DRIVER);
581 	if (pdsc->state & mask)
582 		goto out_unlock;
583 
584 	healthy = pdsc_is_fw_good(pdsc);
585 	dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
586 		__func__, healthy, pdsc->fw_status, pdsc->last_hb);
587 
588 	if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
589 		if (healthy)
590 			pdsc_fw_up(pdsc);
591 	} else {
592 		if (!healthy)
593 			pdsc_fw_down(pdsc);
594 	}
595 
596 	pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
597 
598 out_unlock:
599 	mutex_unlock(&pdsc->config_lock);
600 }
601