1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)			"habanalabs: " fmt
9 
10 #include "habanalabs.h"
11 
12 #include <linux/pci.h>
13 #include <linux/sched/signal.h>
14 #include <linux/hwmon.h>
15 #include <uapi/misc/habanalabs.h>
16 
17 #define HL_PLDM_PENDING_RESET_PER_SEC	(HL_PENDING_RESET_PER_SEC * 10)
18 
hl_device_disabled_or_in_reset(struct hl_device * hdev)19 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
20 {
21 	if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
22 		return true;
23 	else
24 		return false;
25 }
26 
hl_device_status(struct hl_device * hdev)27 enum hl_device_status hl_device_status(struct hl_device *hdev)
28 {
29 	enum hl_device_status status;
30 
31 	if (hdev->disabled)
32 		status = HL_DEVICE_STATUS_MALFUNCTION;
33 	else if (atomic_read(&hdev->in_reset))
34 		status = HL_DEVICE_STATUS_IN_RESET;
35 	else
36 		status = HL_DEVICE_STATUS_OPERATIONAL;
37 
38 	return status;
39 }
40 
hpriv_release(struct kref * ref)41 static void hpriv_release(struct kref *ref)
42 {
43 	struct hl_fpriv *hpriv;
44 	struct hl_device *hdev;
45 
46 	hpriv = container_of(ref, struct hl_fpriv, refcount);
47 
48 	hdev = hpriv->hdev;
49 
50 	put_pid(hpriv->taskpid);
51 
52 	hl_debugfs_remove_file(hpriv);
53 
54 	mutex_destroy(&hpriv->restore_phase_mutex);
55 
56 	mutex_lock(&hdev->fpriv_list_lock);
57 	list_del(&hpriv->dev_node);
58 	hdev->compute_ctx = NULL;
59 	mutex_unlock(&hdev->fpriv_list_lock);
60 
61 	kfree(hpriv);
62 }
63 
hl_hpriv_get(struct hl_fpriv * hpriv)64 void hl_hpriv_get(struct hl_fpriv *hpriv)
65 {
66 	kref_get(&hpriv->refcount);
67 }
68 
hl_hpriv_put(struct hl_fpriv * hpriv)69 void hl_hpriv_put(struct hl_fpriv *hpriv)
70 {
71 	kref_put(&hpriv->refcount, hpriv_release);
72 }
73 
74 /*
75  * hl_device_release - release function for habanalabs device
76  *
77  * @inode: pointer to inode structure
78  * @filp: pointer to file structure
79  *
80  * Called when process closes an habanalabs device
81  */
hl_device_release(struct inode * inode,struct file * filp)82 static int hl_device_release(struct inode *inode, struct file *filp)
83 {
84 	struct hl_fpriv *hpriv = filp->private_data;
85 
86 	hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
87 	hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
88 
89 	filp->private_data = NULL;
90 
91 	hl_hpriv_put(hpriv);
92 
93 	return 0;
94 }
95 
hl_device_release_ctrl(struct inode * inode,struct file * filp)96 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
97 {
98 	struct hl_fpriv *hpriv = filp->private_data;
99 	struct hl_device *hdev;
100 
101 	filp->private_data = NULL;
102 
103 	hdev = hpriv->hdev;
104 
105 	mutex_lock(&hdev->fpriv_list_lock);
106 	list_del(&hpriv->dev_node);
107 	mutex_unlock(&hdev->fpriv_list_lock);
108 
109 	kfree(hpriv);
110 
111 	return 0;
112 }
113 
114 /*
115  * hl_mmap - mmap function for habanalabs device
116  *
117  * @*filp: pointer to file structure
118  * @*vma: pointer to vm_area_struct of the process
119  *
120  * Called when process does an mmap on habanalabs device. Call the device's mmap
121  * function at the end of the common code.
122  */
hl_mmap(struct file * filp,struct vm_area_struct * vma)123 static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
124 {
125 	struct hl_fpriv *hpriv = filp->private_data;
126 	unsigned long vm_pgoff;
127 
128 	vm_pgoff = vma->vm_pgoff;
129 	vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
130 
131 	switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
132 	case HL_MMAP_TYPE_CB:
133 		return hl_cb_mmap(hpriv, vma);
134 	}
135 
136 	return -EINVAL;
137 }
138 
139 static const struct file_operations hl_ops = {
140 	.owner = THIS_MODULE,
141 	.open = hl_device_open,
142 	.release = hl_device_release,
143 	.mmap = hl_mmap,
144 	.unlocked_ioctl = hl_ioctl,
145 	.compat_ioctl = hl_ioctl
146 };
147 
148 static const struct file_operations hl_ctrl_ops = {
149 	.owner = THIS_MODULE,
150 	.open = hl_device_open_ctrl,
151 	.release = hl_device_release_ctrl,
152 	.unlocked_ioctl = hl_ioctl_control,
153 	.compat_ioctl = hl_ioctl_control
154 };
155 
device_release_func(struct device * dev)156 static void device_release_func(struct device *dev)
157 {
158 	kfree(dev);
159 }
160 
161 /*
162  * device_init_cdev - Initialize cdev and device for habanalabs device
163  *
164  * @hdev: pointer to habanalabs device structure
165  * @hclass: pointer to the class object of the device
166  * @minor: minor number of the specific device
167  * @fpos: file operations to install for this device
168  * @name: name of the device as it will appear in the filesystem
169  * @cdev: pointer to the char device object that will be initialized
170  * @dev: pointer to the device object that will be initialized
171  *
172  * Initialize a cdev and a Linux device for habanalabs's device.
173  */
device_init_cdev(struct hl_device * hdev,struct class * hclass,int minor,const struct file_operations * fops,char * name,struct cdev * cdev,struct device ** dev)174 static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
175 				int minor, const struct file_operations *fops,
176 				char *name, struct cdev *cdev,
177 				struct device **dev)
178 {
179 	cdev_init(cdev, fops);
180 	cdev->owner = THIS_MODULE;
181 
182 	*dev = kzalloc(sizeof(**dev), GFP_KERNEL);
183 	if (!*dev)
184 		return -ENOMEM;
185 
186 	device_initialize(*dev);
187 	(*dev)->devt = MKDEV(hdev->major, minor);
188 	(*dev)->class = hclass;
189 	(*dev)->release = device_release_func;
190 	dev_set_drvdata(*dev, hdev);
191 	dev_set_name(*dev, "%s", name);
192 
193 	return 0;
194 }
195 
device_cdev_sysfs_add(struct hl_device * hdev)196 static int device_cdev_sysfs_add(struct hl_device *hdev)
197 {
198 	int rc;
199 
200 	rc = cdev_device_add(&hdev->cdev, hdev->dev);
201 	if (rc) {
202 		dev_err(hdev->dev,
203 			"failed to add a char device to the system\n");
204 		return rc;
205 	}
206 
207 	rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
208 	if (rc) {
209 		dev_err(hdev->dev,
210 			"failed to add a control char device to the system\n");
211 		goto delete_cdev_device;
212 	}
213 
214 	/* hl_sysfs_init() must be done after adding the device to the system */
215 	rc = hl_sysfs_init(hdev);
216 	if (rc) {
217 		dev_err(hdev->dev, "failed to initialize sysfs\n");
218 		goto delete_ctrl_cdev_device;
219 	}
220 
221 	hdev->cdev_sysfs_created = true;
222 
223 	return 0;
224 
225 delete_ctrl_cdev_device:
226 	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
227 delete_cdev_device:
228 	cdev_device_del(&hdev->cdev, hdev->dev);
229 	return rc;
230 }
231 
device_cdev_sysfs_del(struct hl_device * hdev)232 static void device_cdev_sysfs_del(struct hl_device *hdev)
233 {
234 	if (!hdev->cdev_sysfs_created)
235 		goto put_devices;
236 
237 	hl_sysfs_fini(hdev);
238 	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
239 	cdev_device_del(&hdev->cdev, hdev->dev);
240 
241 put_devices:
242 	put_device(hdev->dev);
243 	put_device(hdev->dev_ctrl);
244 }
245 
246 /*
247  * device_early_init - do some early initialization for the habanalabs device
248  *
249  * @hdev: pointer to habanalabs device structure
250  *
251  * Install the relevant function pointers and call the early_init function,
252  * if such a function exists
253  */
device_early_init(struct hl_device * hdev)254 static int device_early_init(struct hl_device *hdev)
255 {
256 	int i, rc;
257 	char workq_name[32];
258 
259 	switch (hdev->asic_type) {
260 	case ASIC_GOYA:
261 		goya_set_asic_funcs(hdev);
262 		strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
263 		break;
264 	case ASIC_GAUDI:
265 		gaudi_set_asic_funcs(hdev);
266 		sprintf(hdev->asic_name, "GAUDI");
267 		break;
268 	default:
269 		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
270 			hdev->asic_type);
271 		return -EINVAL;
272 	}
273 
274 	rc = hdev->asic_funcs->early_init(hdev);
275 	if (rc)
276 		return rc;
277 
278 	rc = hl_asid_init(hdev);
279 	if (rc)
280 		goto early_fini;
281 
282 	if (hdev->asic_prop.completion_queues_count) {
283 		hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
284 				sizeof(*hdev->cq_wq),
285 				GFP_ATOMIC);
286 		if (!hdev->cq_wq) {
287 			rc = -ENOMEM;
288 			goto asid_fini;
289 		}
290 	}
291 
292 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
293 		snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
294 		hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
295 		if (hdev->cq_wq[i] == NULL) {
296 			dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
297 			rc = -ENOMEM;
298 			goto free_cq_wq;
299 		}
300 	}
301 
302 	hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
303 	if (hdev->eq_wq == NULL) {
304 		dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
305 		rc = -ENOMEM;
306 		goto free_cq_wq;
307 	}
308 
309 	hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
310 					GFP_KERNEL);
311 	if (!hdev->hl_chip_info) {
312 		rc = -ENOMEM;
313 		goto free_eq_wq;
314 	}
315 
316 	hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
317 					sizeof(struct hl_device_idle_busy_ts),
318 					(GFP_KERNEL | __GFP_ZERO));
319 	if (!hdev->idle_busy_ts_arr) {
320 		rc = -ENOMEM;
321 		goto free_chip_info;
322 	}
323 
324 	rc = hl_mmu_if_set_funcs(hdev);
325 	if (rc)
326 		goto free_idle_busy_ts_arr;
327 
328 	hl_cb_mgr_init(&hdev->kernel_cb_mgr);
329 
330 	mutex_init(&hdev->send_cpu_message_lock);
331 	mutex_init(&hdev->debug_lock);
332 	mutex_init(&hdev->mmu_cache_lock);
333 	INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
334 	spin_lock_init(&hdev->hw_queues_mirror_lock);
335 	INIT_LIST_HEAD(&hdev->fpriv_list);
336 	mutex_init(&hdev->fpriv_list_lock);
337 	atomic_set(&hdev->in_reset, 0);
338 
339 	return 0;
340 
341 free_idle_busy_ts_arr:
342 	kfree(hdev->idle_busy_ts_arr);
343 free_chip_info:
344 	kfree(hdev->hl_chip_info);
345 free_eq_wq:
346 	destroy_workqueue(hdev->eq_wq);
347 free_cq_wq:
348 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
349 		if (hdev->cq_wq[i])
350 			destroy_workqueue(hdev->cq_wq[i]);
351 	kfree(hdev->cq_wq);
352 asid_fini:
353 	hl_asid_fini(hdev);
354 early_fini:
355 	if (hdev->asic_funcs->early_fini)
356 		hdev->asic_funcs->early_fini(hdev);
357 
358 	return rc;
359 }
360 
361 /*
362  * device_early_fini - finalize all that was done in device_early_init
363  *
364  * @hdev: pointer to habanalabs device structure
365  *
366  */
device_early_fini(struct hl_device * hdev)367 static void device_early_fini(struct hl_device *hdev)
368 {
369 	int i;
370 
371 	mutex_destroy(&hdev->mmu_cache_lock);
372 	mutex_destroy(&hdev->debug_lock);
373 	mutex_destroy(&hdev->send_cpu_message_lock);
374 
375 	mutex_destroy(&hdev->fpriv_list_lock);
376 
377 	hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
378 
379 	kfree(hdev->idle_busy_ts_arr);
380 	kfree(hdev->hl_chip_info);
381 
382 	destroy_workqueue(hdev->eq_wq);
383 
384 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
385 		destroy_workqueue(hdev->cq_wq[i]);
386 	kfree(hdev->cq_wq);
387 
388 	hl_asid_fini(hdev);
389 
390 	if (hdev->asic_funcs->early_fini)
391 		hdev->asic_funcs->early_fini(hdev);
392 }
393 
set_freq_to_low_job(struct work_struct * work)394 static void set_freq_to_low_job(struct work_struct *work)
395 {
396 	struct hl_device *hdev = container_of(work, struct hl_device,
397 						work_freq.work);
398 
399 	mutex_lock(&hdev->fpriv_list_lock);
400 
401 	if (!hdev->compute_ctx)
402 		hl_device_set_frequency(hdev, PLL_LOW);
403 
404 	mutex_unlock(&hdev->fpriv_list_lock);
405 
406 	schedule_delayed_work(&hdev->work_freq,
407 			usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
408 }
409 
hl_device_heartbeat(struct work_struct * work)410 static void hl_device_heartbeat(struct work_struct *work)
411 {
412 	struct hl_device *hdev = container_of(work, struct hl_device,
413 						work_heartbeat.work);
414 
415 	if (hl_device_disabled_or_in_reset(hdev))
416 		goto reschedule;
417 
418 	if (!hdev->asic_funcs->send_heartbeat(hdev))
419 		goto reschedule;
420 
421 	dev_err(hdev->dev, "Device heartbeat failed!\n");
422 	hl_device_reset(hdev, true, false);
423 
424 	return;
425 
426 reschedule:
427 	schedule_delayed_work(&hdev->work_heartbeat,
428 			usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
429 }
430 
431 /*
432  * device_late_init - do late stuff initialization for the habanalabs device
433  *
434  * @hdev: pointer to habanalabs device structure
435  *
436  * Do stuff that either needs the device H/W queues to be active or needs
437  * to happen after all the rest of the initialization is finished
438  */
device_late_init(struct hl_device * hdev)439 static int device_late_init(struct hl_device *hdev)
440 {
441 	int rc;
442 
443 	if (hdev->asic_funcs->late_init) {
444 		rc = hdev->asic_funcs->late_init(hdev);
445 		if (rc) {
446 			dev_err(hdev->dev,
447 				"failed late initialization for the H/W\n");
448 			return rc;
449 		}
450 	}
451 
452 	hdev->high_pll = hdev->asic_prop.high_pll;
453 
454 	/* force setting to low frequency */
455 	hdev->curr_pll_profile = PLL_LOW;
456 
457 	if (hdev->pm_mng_profile == PM_AUTO)
458 		hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
459 	else
460 		hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
461 
462 	INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
463 	schedule_delayed_work(&hdev->work_freq,
464 	usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
465 
466 	if (hdev->heartbeat) {
467 		INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
468 		schedule_delayed_work(&hdev->work_heartbeat,
469 				usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
470 	}
471 
472 	hdev->late_init_done = true;
473 
474 	return 0;
475 }
476 
477 /*
478  * device_late_fini - finalize all that was done in device_late_init
479  *
480  * @hdev: pointer to habanalabs device structure
481  *
482  */
device_late_fini(struct hl_device * hdev)483 static void device_late_fini(struct hl_device *hdev)
484 {
485 	if (!hdev->late_init_done)
486 		return;
487 
488 	cancel_delayed_work_sync(&hdev->work_freq);
489 	if (hdev->heartbeat)
490 		cancel_delayed_work_sync(&hdev->work_heartbeat);
491 
492 	if (hdev->asic_funcs->late_fini)
493 		hdev->asic_funcs->late_fini(hdev);
494 
495 	hdev->late_init_done = false;
496 }
497 
hl_device_utilization(struct hl_device * hdev,uint32_t period_ms)498 uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
499 {
500 	struct hl_device_idle_busy_ts *ts;
501 	ktime_t zero_ktime, curr = ktime_get();
502 	u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
503 	s64 period_us, last_start_us, last_end_us, last_busy_time_us,
504 		total_busy_time_us = 0, total_busy_time_ms;
505 
506 	zero_ktime = ktime_set(0, 0);
507 	period_us = period_ms * USEC_PER_MSEC;
508 	ts = &hdev->idle_busy_ts_arr[last_index];
509 
510 	/* check case that device is currently in idle */
511 	if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
512 			!ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
513 
514 		last_index--;
515 		/* Handle case idle_busy_ts_idx was 0 */
516 		if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
517 			last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
518 
519 		ts = &hdev->idle_busy_ts_arr[last_index];
520 	}
521 
522 	while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
523 		/* Check if we are in last sample case. i.e. if the sample
524 		 * begun before the sampling period. This could be a real
525 		 * sample or 0 so need to handle both cases
526 		 */
527 		last_start_us = ktime_to_us(
528 				ktime_sub(curr, ts->idle_to_busy_ts));
529 
530 		if (last_start_us > period_us) {
531 
532 			/* First check two cases:
533 			 * 1. If the device is currently busy
534 			 * 2. If the device was idle during the whole sampling
535 			 *    period
536 			 */
537 
538 			if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
539 				/* Check if the device is currently busy */
540 				if (ktime_compare(ts->idle_to_busy_ts,
541 						zero_ktime))
542 					return 100;
543 
544 				/* We either didn't have any activity or we
545 				 * reached an entry which is 0. Either way,
546 				 * exit and return what was accumulated so far
547 				 */
548 				break;
549 			}
550 
551 			/* If sample has finished, check it is relevant */
552 			last_end_us = ktime_to_us(
553 					ktime_sub(curr, ts->busy_to_idle_ts));
554 
555 			if (last_end_us > period_us)
556 				break;
557 
558 			/* It is relevant so add it but with adjustment */
559 			last_busy_time_us = ktime_to_us(
560 						ktime_sub(ts->busy_to_idle_ts,
561 						ts->idle_to_busy_ts));
562 			total_busy_time_us += last_busy_time_us -
563 					(last_start_us - period_us);
564 			break;
565 		}
566 
567 		/* Check if the sample is finished or still open */
568 		if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
569 			last_busy_time_us = ktime_to_us(
570 						ktime_sub(ts->busy_to_idle_ts,
571 						ts->idle_to_busy_ts));
572 		else
573 			last_busy_time_us = ktime_to_us(
574 					ktime_sub(curr, ts->idle_to_busy_ts));
575 
576 		total_busy_time_us += last_busy_time_us;
577 
578 		last_index--;
579 		/* Handle case idle_busy_ts_idx was 0 */
580 		if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
581 			last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
582 
583 		ts = &hdev->idle_busy_ts_arr[last_index];
584 
585 		overlap_cnt++;
586 	}
587 
588 	total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
589 						USEC_PER_MSEC);
590 
591 	return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
592 }
593 
594 /*
595  * hl_device_set_frequency - set the frequency of the device
596  *
597  * @hdev: pointer to habanalabs device structure
598  * @freq: the new frequency value
599  *
600  * Change the frequency if needed. This function has no protection against
601  * concurrency, therefore it is assumed that the calling function has protected
602  * itself against the case of calling this function from multiple threads with
603  * different values
604  *
605  * Returns 0 if no change was done, otherwise returns 1
606  */
hl_device_set_frequency(struct hl_device * hdev,enum hl_pll_frequency freq)607 int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
608 {
609 	if ((hdev->pm_mng_profile == PM_MANUAL) ||
610 			(hdev->curr_pll_profile == freq))
611 		return 0;
612 
613 	dev_dbg(hdev->dev, "Changing device frequency to %s\n",
614 		freq == PLL_HIGH ? "high" : "low");
615 
616 	hdev->asic_funcs->set_pll_profile(hdev, freq);
617 
618 	hdev->curr_pll_profile = freq;
619 
620 	return 1;
621 }
622 
hl_device_set_debug_mode(struct hl_device * hdev,bool enable)623 int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
624 {
625 	int rc = 0;
626 
627 	mutex_lock(&hdev->debug_lock);
628 
629 	if (!enable) {
630 		if (!hdev->in_debug) {
631 			dev_err(hdev->dev,
632 				"Failed to disable debug mode because device was not in debug mode\n");
633 			rc = -EFAULT;
634 			goto out;
635 		}
636 
637 		if (!hdev->hard_reset_pending)
638 			hdev->asic_funcs->halt_coresight(hdev);
639 
640 		hdev->in_debug = 0;
641 
642 		if (!hdev->hard_reset_pending)
643 			hdev->asic_funcs->set_clock_gating(hdev);
644 
645 		goto out;
646 	}
647 
648 	if (hdev->in_debug) {
649 		dev_err(hdev->dev,
650 			"Failed to enable debug mode because device is already in debug mode\n");
651 		rc = -EFAULT;
652 		goto out;
653 	}
654 
655 	hdev->asic_funcs->disable_clock_gating(hdev);
656 	hdev->in_debug = 1;
657 
658 out:
659 	mutex_unlock(&hdev->debug_lock);
660 
661 	return rc;
662 }
663 
664 /*
665  * hl_device_suspend - initiate device suspend
666  *
667  * @hdev: pointer to habanalabs device structure
668  *
669  * Puts the hw in the suspend state (all asics).
670  * Returns 0 for success or an error on failure.
671  * Called at driver suspend.
672  */
hl_device_suspend(struct hl_device * hdev)673 int hl_device_suspend(struct hl_device *hdev)
674 {
675 	int rc;
676 
677 	pci_save_state(hdev->pdev);
678 
679 	/* Block future CS/VM/JOB completion operations */
680 	rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
681 	if (rc) {
682 		dev_err(hdev->dev, "Can't suspend while in reset\n");
683 		return -EIO;
684 	}
685 
686 	/* This blocks all other stuff that is not blocked by in_reset */
687 	hdev->disabled = true;
688 
689 	/*
690 	 * Flush anyone that is inside the critical section of enqueue
691 	 * jobs to the H/W
692 	 */
693 	hdev->asic_funcs->hw_queues_lock(hdev);
694 	hdev->asic_funcs->hw_queues_unlock(hdev);
695 
696 	/* Flush processes that are sending message to CPU */
697 	mutex_lock(&hdev->send_cpu_message_lock);
698 	mutex_unlock(&hdev->send_cpu_message_lock);
699 
700 	rc = hdev->asic_funcs->suspend(hdev);
701 	if (rc)
702 		dev_err(hdev->dev,
703 			"Failed to disable PCI access of device CPU\n");
704 
705 	/* Shut down the device */
706 	pci_disable_device(hdev->pdev);
707 	pci_set_power_state(hdev->pdev, PCI_D3hot);
708 
709 	return 0;
710 }
711 
712 /*
713  * hl_device_resume - initiate device resume
714  *
715  * @hdev: pointer to habanalabs device structure
716  *
717  * Bring the hw back to operating state (all asics).
718  * Returns 0 for success or an error on failure.
719  * Called at driver resume.
720  */
hl_device_resume(struct hl_device * hdev)721 int hl_device_resume(struct hl_device *hdev)
722 {
723 	int rc;
724 
725 	pci_set_power_state(hdev->pdev, PCI_D0);
726 	pci_restore_state(hdev->pdev);
727 	rc = pci_enable_device_mem(hdev->pdev);
728 	if (rc) {
729 		dev_err(hdev->dev,
730 			"Failed to enable PCI device in resume\n");
731 		return rc;
732 	}
733 
734 	pci_set_master(hdev->pdev);
735 
736 	rc = hdev->asic_funcs->resume(hdev);
737 	if (rc) {
738 		dev_err(hdev->dev, "Failed to resume device after suspend\n");
739 		goto disable_device;
740 	}
741 
742 
743 	hdev->disabled = false;
744 	atomic_set(&hdev->in_reset, 0);
745 
746 	rc = hl_device_reset(hdev, true, false);
747 	if (rc) {
748 		dev_err(hdev->dev, "Failed to reset device during resume\n");
749 		goto disable_device;
750 	}
751 
752 	return 0;
753 
754 disable_device:
755 	pci_clear_master(hdev->pdev);
756 	pci_disable_device(hdev->pdev);
757 
758 	return rc;
759 }
760 
device_kill_open_processes(struct hl_device * hdev)761 static int device_kill_open_processes(struct hl_device *hdev)
762 {
763 	u16 pending_total, pending_cnt;
764 	struct hl_fpriv	*hpriv;
765 	struct task_struct *task = NULL;
766 
767 	if (hdev->pldm)
768 		pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
769 	else
770 		pending_total = HL_PENDING_RESET_PER_SEC;
771 
772 	/* Giving time for user to close FD, and for processes that are inside
773 	 * hl_device_open to finish
774 	 */
775 	if (!list_empty(&hdev->fpriv_list))
776 		ssleep(1);
777 
778 	mutex_lock(&hdev->fpriv_list_lock);
779 
780 	/* This section must be protected because we are dereferencing
781 	 * pointers that are freed if the process exits
782 	 */
783 	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
784 		task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
785 		if (task) {
786 			dev_info(hdev->dev, "Killing user process pid=%d\n",
787 				task_pid_nr(task));
788 			send_sig(SIGKILL, task, 1);
789 			usleep_range(1000, 10000);
790 
791 			put_task_struct(task);
792 		}
793 	}
794 
795 	mutex_unlock(&hdev->fpriv_list_lock);
796 
797 	/* We killed the open users, but because the driver cleans up after the
798 	 * user contexts are closed (e.g. mmu mappings), we need to wait again
799 	 * to make sure the cleaning phase is finished before continuing with
800 	 * the reset
801 	 */
802 
803 	pending_cnt = pending_total;
804 
805 	while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
806 		dev_info(hdev->dev,
807 			"Waiting for all unmap operations to finish before hard reset\n");
808 
809 		pending_cnt--;
810 
811 		ssleep(1);
812 	}
813 
814 	return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY;
815 }
816 
device_hard_reset_pending(struct work_struct * work)817 static void device_hard_reset_pending(struct work_struct *work)
818 {
819 	struct hl_device_reset_work *device_reset_work =
820 		container_of(work, struct hl_device_reset_work, reset_work);
821 	struct hl_device *hdev = device_reset_work->hdev;
822 
823 	hl_device_reset(hdev, true, true);
824 
825 	kfree(device_reset_work);
826 }
827 
828 /*
829  * hl_device_reset - reset the device
830  *
831  * @hdev: pointer to habanalabs device structure
832  * @hard_reset: should we do hard reset to all engines or just reset the
833  *              compute/dma engines
834  * @from_hard_reset_thread: is the caller the hard-reset thread
835  *
836  * Block future CS and wait for pending CS to be enqueued
837  * Call ASIC H/W fini
838  * Flush all completions
839  * Re-initialize all internal data structures
840  * Call ASIC H/W init, late_init
841  * Test queues
842  * Enable device
843  *
844  * Returns 0 for success or an error on failure.
845  */
hl_device_reset(struct hl_device * hdev,bool hard_reset,bool from_hard_reset_thread)846 int hl_device_reset(struct hl_device *hdev, bool hard_reset,
847 			bool from_hard_reset_thread)
848 {
849 	int i, rc;
850 
851 	if (!hdev->init_done) {
852 		dev_err(hdev->dev,
853 			"Can't reset before initialization is done\n");
854 		return 0;
855 	}
856 
857 	if ((!hard_reset) && (!hdev->supports_soft_reset)) {
858 		dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
859 		hard_reset = true;
860 	}
861 
862 	/*
863 	 * Prevent concurrency in this function - only one reset should be
864 	 * done at any given time. Only need to perform this if we didn't
865 	 * get from the dedicated hard reset thread
866 	 */
867 	if (!from_hard_reset_thread) {
868 		/* Block future CS/VM/JOB completion operations */
869 		rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
870 		if (rc)
871 			return 0;
872 
873 		if (hard_reset) {
874 			/* Disable PCI access from device F/W so he won't send
875 			 * us additional interrupts. We disable MSI/MSI-X at
876 			 * the halt_engines function and we can't have the F/W
877 			 * sending us interrupts after that. We need to disable
878 			 * the access here because if the device is marked
879 			 * disable, the message won't be send. Also, in case
880 			 * of heartbeat, the device CPU is marked as disable
881 			 * so this message won't be sent
882 			 */
883 			if (hl_fw_send_pci_access_msg(hdev,
884 					CPUCP_PACKET_DISABLE_PCI_ACCESS))
885 				dev_warn(hdev->dev,
886 					"Failed to disable PCI access by F/W\n");
887 		}
888 
889 		/* This also blocks future CS/VM/JOB completion operations */
890 		hdev->disabled = true;
891 
892 		/* Flush anyone that is inside the critical section of enqueue
893 		 * jobs to the H/W
894 		 */
895 		hdev->asic_funcs->hw_queues_lock(hdev);
896 		hdev->asic_funcs->hw_queues_unlock(hdev);
897 
898 		/* Flush anyone that is inside device open */
899 		mutex_lock(&hdev->fpriv_list_lock);
900 		mutex_unlock(&hdev->fpriv_list_lock);
901 
902 		dev_err(hdev->dev, "Going to RESET device!\n");
903 	}
904 
905 again:
906 	if ((hard_reset) && (!from_hard_reset_thread)) {
907 		struct hl_device_reset_work *device_reset_work;
908 
909 		hdev->hard_reset_pending = true;
910 
911 		device_reset_work = kzalloc(sizeof(*device_reset_work),
912 						GFP_ATOMIC);
913 		if (!device_reset_work) {
914 			rc = -ENOMEM;
915 			goto out_err;
916 		}
917 
918 		/*
919 		 * Because the reset function can't run from interrupt or
920 		 * from heartbeat work, we need to call the reset function
921 		 * from a dedicated work
922 		 */
923 		INIT_WORK(&device_reset_work->reset_work,
924 				device_hard_reset_pending);
925 		device_reset_work->hdev = hdev;
926 		schedule_work(&device_reset_work->reset_work);
927 
928 		return 0;
929 	}
930 
931 	if (hard_reset) {
932 		device_late_fini(hdev);
933 
934 		/*
935 		 * Now that the heartbeat thread is closed, flush processes
936 		 * which are sending messages to CPU
937 		 */
938 		mutex_lock(&hdev->send_cpu_message_lock);
939 		mutex_unlock(&hdev->send_cpu_message_lock);
940 	}
941 
942 	/*
943 	 * Halt the engines and disable interrupts so we won't get any more
944 	 * completions from H/W and we won't have any accesses from the
945 	 * H/W to the host machine
946 	 */
947 	hdev->asic_funcs->halt_engines(hdev, hard_reset);
948 
949 	/* Go over all the queues, release all CS and their jobs */
950 	hl_cs_rollback_all(hdev);
951 
952 	if (hard_reset) {
953 		/* Kill processes here after CS rollback. This is because the
954 		 * process can't really exit until all its CSs are done, which
955 		 * is what we do in cs rollback
956 		 */
957 		rc = device_kill_open_processes(hdev);
958 		if (rc) {
959 			dev_crit(hdev->dev,
960 				"Failed to kill all open processes, stopping hard reset\n");
961 			goto out_err;
962 		}
963 
964 		/* Flush the Event queue workers to make sure no other thread is
965 		 * reading or writing to registers during the reset
966 		 */
967 		flush_workqueue(hdev->eq_wq);
968 	}
969 
970 	/* Reset the H/W. It will be in idle state after this returns */
971 	hdev->asic_funcs->hw_fini(hdev, hard_reset);
972 
973 	if (hard_reset) {
974 		/* Release kernel context */
975 		if (hl_ctx_put(hdev->kernel_ctx) == 1)
976 			hdev->kernel_ctx = NULL;
977 		hl_vm_fini(hdev);
978 		hl_mmu_fini(hdev);
979 		hl_eq_reset(hdev, &hdev->event_queue);
980 	}
981 
982 	/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
983 	hl_hw_queue_reset(hdev, hard_reset);
984 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
985 		hl_cq_reset(hdev, &hdev->completion_queue[i]);
986 
987 	hdev->idle_busy_ts_idx = 0;
988 	hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
989 	hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
990 
991 	if (hdev->cs_active_cnt)
992 		dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
993 			hdev->cs_active_cnt);
994 
995 	mutex_lock(&hdev->fpriv_list_lock);
996 
997 	/* Make sure the context switch phase will run again */
998 	if (hdev->compute_ctx) {
999 		atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
1000 		hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
1001 	}
1002 
1003 	mutex_unlock(&hdev->fpriv_list_lock);
1004 
1005 	/* Finished tear-down, starting to re-initialize */
1006 
1007 	if (hard_reset) {
1008 		hdev->device_cpu_disabled = false;
1009 		hdev->hard_reset_pending = false;
1010 
1011 		if (hdev->kernel_ctx) {
1012 			dev_crit(hdev->dev,
1013 				"kernel ctx was alive during hard reset, something is terribly wrong\n");
1014 			rc = -EBUSY;
1015 			goto out_err;
1016 		}
1017 
1018 		rc = hl_mmu_init(hdev);
1019 		if (rc) {
1020 			dev_err(hdev->dev,
1021 				"Failed to initialize MMU S/W after hard reset\n");
1022 			goto out_err;
1023 		}
1024 
1025 		/* Allocate the kernel context */
1026 		hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1027 						GFP_KERNEL);
1028 		if (!hdev->kernel_ctx) {
1029 			rc = -ENOMEM;
1030 			goto out_err;
1031 		}
1032 
1033 		hdev->compute_ctx = NULL;
1034 
1035 		rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1036 		if (rc) {
1037 			dev_err(hdev->dev,
1038 				"failed to init kernel ctx in hard reset\n");
1039 			kfree(hdev->kernel_ctx);
1040 			hdev->kernel_ctx = NULL;
1041 			goto out_err;
1042 		}
1043 	}
1044 
1045 	/* Device is now enabled as part of the initialization requires
1046 	 * communication with the device firmware to get information that
1047 	 * is required for the initialization itself
1048 	 */
1049 	hdev->disabled = false;
1050 
1051 	rc = hdev->asic_funcs->hw_init(hdev);
1052 	if (rc) {
1053 		dev_err(hdev->dev,
1054 			"failed to initialize the H/W after reset\n");
1055 		goto out_err;
1056 	}
1057 
1058 	/* Check that the communication with the device is working */
1059 	rc = hdev->asic_funcs->test_queues(hdev);
1060 	if (rc) {
1061 		dev_err(hdev->dev,
1062 			"Failed to detect if device is alive after reset\n");
1063 		goto out_err;
1064 	}
1065 
1066 	if (hard_reset) {
1067 		rc = device_late_init(hdev);
1068 		if (rc) {
1069 			dev_err(hdev->dev,
1070 				"Failed late init after hard reset\n");
1071 			goto out_err;
1072 		}
1073 
1074 		rc = hl_vm_init(hdev);
1075 		if (rc) {
1076 			dev_err(hdev->dev,
1077 				"Failed to init memory module after hard reset\n");
1078 			goto out_err;
1079 		}
1080 
1081 		hl_set_max_power(hdev);
1082 	} else {
1083 		rc = hdev->asic_funcs->soft_reset_late_init(hdev);
1084 		if (rc) {
1085 			dev_err(hdev->dev,
1086 				"Failed late init after soft reset\n");
1087 			goto out_err;
1088 		}
1089 	}
1090 
1091 	atomic_set(&hdev->in_reset, 0);
1092 
1093 	if (hard_reset)
1094 		hdev->hard_reset_cnt++;
1095 	else
1096 		hdev->soft_reset_cnt++;
1097 
1098 	dev_warn(hdev->dev, "Successfully finished resetting the device\n");
1099 
1100 	return 0;
1101 
1102 out_err:
1103 	hdev->disabled = true;
1104 
1105 	if (hard_reset) {
1106 		dev_err(hdev->dev,
1107 			"Failed to reset! Device is NOT usable\n");
1108 		hdev->hard_reset_cnt++;
1109 	} else {
1110 		dev_err(hdev->dev,
1111 			"Failed to do soft-reset, trying hard reset\n");
1112 		hdev->soft_reset_cnt++;
1113 		hard_reset = true;
1114 		goto again;
1115 	}
1116 
1117 	atomic_set(&hdev->in_reset, 0);
1118 
1119 	return rc;
1120 }
1121 
1122 /*
1123  * hl_device_init - main initialization function for habanalabs device
1124  *
1125  * @hdev: pointer to habanalabs device structure
1126  *
1127  * Allocate an id for the device, do early initialization and then call the
1128  * ASIC specific initialization functions. Finally, create the cdev and the
1129  * Linux device to expose it to the user
1130  */
hl_device_init(struct hl_device * hdev,struct class * hclass)1131 int hl_device_init(struct hl_device *hdev, struct class *hclass)
1132 {
1133 	int i, rc, cq_cnt, cq_ready_cnt;
1134 	char *name;
1135 	bool add_cdev_sysfs_on_err = false;
1136 
1137 	name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
1138 	if (!name) {
1139 		rc = -ENOMEM;
1140 		goto out_disabled;
1141 	}
1142 
1143 	/* Initialize cdev and device structures */
1144 	rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1145 				&hdev->cdev, &hdev->dev);
1146 
1147 	kfree(name);
1148 
1149 	if (rc)
1150 		goto out_disabled;
1151 
1152 	name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1153 	if (!name) {
1154 		rc = -ENOMEM;
1155 		goto free_dev;
1156 	}
1157 
1158 	/* Initialize cdev and device structures for control device */
1159 	rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1160 				name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1161 
1162 	kfree(name);
1163 
1164 	if (rc)
1165 		goto free_dev;
1166 
1167 	/* Initialize ASIC function pointers and perform early init */
1168 	rc = device_early_init(hdev);
1169 	if (rc)
1170 		goto free_dev_ctrl;
1171 
1172 	/*
1173 	 * Start calling ASIC initialization. First S/W then H/W and finally
1174 	 * late init
1175 	 */
1176 	rc = hdev->asic_funcs->sw_init(hdev);
1177 	if (rc)
1178 		goto early_fini;
1179 
1180 	/*
1181 	 * Initialize the H/W queues. Must be done before hw_init, because
1182 	 * there the addresses of the kernel queue are being written to the
1183 	 * registers of the device
1184 	 */
1185 	rc = hl_hw_queues_create(hdev);
1186 	if (rc) {
1187 		dev_err(hdev->dev, "failed to initialize kernel queues\n");
1188 		goto sw_fini;
1189 	}
1190 
1191 	cq_cnt = hdev->asic_prop.completion_queues_count;
1192 
1193 	/*
1194 	 * Initialize the completion queues. Must be done before hw_init,
1195 	 * because there the addresses of the completion queues are being
1196 	 * passed as arguments to request_irq
1197 	 */
1198 	if (cq_cnt) {
1199 		hdev->completion_queue = kcalloc(cq_cnt,
1200 				sizeof(*hdev->completion_queue),
1201 				GFP_KERNEL);
1202 
1203 		if (!hdev->completion_queue) {
1204 			dev_err(hdev->dev,
1205 				"failed to allocate completion queues\n");
1206 			rc = -ENOMEM;
1207 			goto hw_queues_destroy;
1208 		}
1209 	}
1210 
1211 	for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1212 		rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1213 				hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1214 		if (rc) {
1215 			dev_err(hdev->dev,
1216 				"failed to initialize completion queue\n");
1217 			goto cq_fini;
1218 		}
1219 		hdev->completion_queue[i].cq_idx = i;
1220 	}
1221 
1222 	/*
1223 	 * Initialize the event queue. Must be done before hw_init,
1224 	 * because there the address of the event queue is being
1225 	 * passed as argument to request_irq
1226 	 */
1227 	rc = hl_eq_init(hdev, &hdev->event_queue);
1228 	if (rc) {
1229 		dev_err(hdev->dev, "failed to initialize event queue\n");
1230 		goto cq_fini;
1231 	}
1232 
1233 	/* MMU S/W must be initialized before kernel context is created */
1234 	rc = hl_mmu_init(hdev);
1235 	if (rc) {
1236 		dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1237 		goto eq_fini;
1238 	}
1239 
1240 	/* Allocate the kernel context */
1241 	hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1242 	if (!hdev->kernel_ctx) {
1243 		rc = -ENOMEM;
1244 		goto mmu_fini;
1245 	}
1246 
1247 	hdev->compute_ctx = NULL;
1248 
1249 	rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1250 	if (rc) {
1251 		dev_err(hdev->dev, "failed to initialize kernel context\n");
1252 		kfree(hdev->kernel_ctx);
1253 		goto mmu_fini;
1254 	}
1255 
1256 	rc = hl_cb_pool_init(hdev);
1257 	if (rc) {
1258 		dev_err(hdev->dev, "failed to initialize CB pool\n");
1259 		goto release_ctx;
1260 	}
1261 
1262 	hl_debugfs_add_device(hdev);
1263 
1264 	if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
1265 		dev_info(hdev->dev,
1266 			"H/W state is dirty, must reset before initializing\n");
1267 		hdev->asic_funcs->halt_engines(hdev, true);
1268 		hdev->asic_funcs->hw_fini(hdev, true);
1269 	}
1270 
1271 	/*
1272 	 * From this point, in case of an error, add char devices and create
1273 	 * sysfs nodes as part of the error flow, to allow debugging.
1274 	 */
1275 	add_cdev_sysfs_on_err = true;
1276 
1277 	/* Device is now enabled as part of the initialization requires
1278 	 * communication with the device firmware to get information that
1279 	 * is required for the initialization itself
1280 	 */
1281 	hdev->disabled = false;
1282 
1283 	rc = hdev->asic_funcs->hw_init(hdev);
1284 	if (rc) {
1285 		dev_err(hdev->dev, "failed to initialize the H/W\n");
1286 		rc = 0;
1287 		goto out_disabled;
1288 	}
1289 
1290 	/* Check that the communication with the device is working */
1291 	rc = hdev->asic_funcs->test_queues(hdev);
1292 	if (rc) {
1293 		dev_err(hdev->dev, "Failed to detect if device is alive\n");
1294 		rc = 0;
1295 		goto out_disabled;
1296 	}
1297 
1298 	rc = device_late_init(hdev);
1299 	if (rc) {
1300 		dev_err(hdev->dev, "Failed late initialization\n");
1301 		rc = 0;
1302 		goto out_disabled;
1303 	}
1304 
1305 	dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1306 		hdev->asic_name,
1307 		hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
1308 
1309 	rc = hl_vm_init(hdev);
1310 	if (rc) {
1311 		dev_err(hdev->dev, "Failed to initialize memory module\n");
1312 		rc = 0;
1313 		goto out_disabled;
1314 	}
1315 
1316 	/*
1317 	 * Expose devices and sysfs nodes to user.
1318 	 * From here there is no need to add char devices and create sysfs nodes
1319 	 * in case of an error.
1320 	 */
1321 	add_cdev_sysfs_on_err = false;
1322 	rc = device_cdev_sysfs_add(hdev);
1323 	if (rc) {
1324 		dev_err(hdev->dev,
1325 			"Failed to add char devices and sysfs nodes\n");
1326 		rc = 0;
1327 		goto out_disabled;
1328 	}
1329 
1330 	/* Need to call this again because the max power might change,
1331 	 * depending on card type for certain ASICs
1332 	 */
1333 	hl_set_max_power(hdev);
1334 
1335 	/*
1336 	 * hl_hwmon_init() must be called after device_late_init(), because only
1337 	 * there we get the information from the device about which
1338 	 * hwmon-related sensors the device supports.
1339 	 * Furthermore, it must be done after adding the device to the system.
1340 	 */
1341 	rc = hl_hwmon_init(hdev);
1342 	if (rc) {
1343 		dev_err(hdev->dev, "Failed to initialize hwmon\n");
1344 		rc = 0;
1345 		goto out_disabled;
1346 	}
1347 
1348 	dev_notice(hdev->dev,
1349 		"Successfully added device to habanalabs driver\n");
1350 
1351 	hdev->init_done = true;
1352 
1353 	return 0;
1354 
1355 release_ctx:
1356 	if (hl_ctx_put(hdev->kernel_ctx) != 1)
1357 		dev_err(hdev->dev,
1358 			"kernel ctx is still alive on initialization failure\n");
1359 mmu_fini:
1360 	hl_mmu_fini(hdev);
1361 eq_fini:
1362 	hl_eq_fini(hdev, &hdev->event_queue);
1363 cq_fini:
1364 	for (i = 0 ; i < cq_ready_cnt ; i++)
1365 		hl_cq_fini(hdev, &hdev->completion_queue[i]);
1366 	kfree(hdev->completion_queue);
1367 hw_queues_destroy:
1368 	hl_hw_queues_destroy(hdev);
1369 sw_fini:
1370 	hdev->asic_funcs->sw_fini(hdev);
1371 early_fini:
1372 	device_early_fini(hdev);
1373 free_dev_ctrl:
1374 	put_device(hdev->dev_ctrl);
1375 free_dev:
1376 	put_device(hdev->dev);
1377 out_disabled:
1378 	hdev->disabled = true;
1379 	if (add_cdev_sysfs_on_err)
1380 		device_cdev_sysfs_add(hdev);
1381 	if (hdev->pdev)
1382 		dev_err(&hdev->pdev->dev,
1383 			"Failed to initialize hl%d. Device is NOT usable !\n",
1384 			hdev->id / 2);
1385 	else
1386 		pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1387 			hdev->id / 2);
1388 
1389 	return rc;
1390 }
1391 
1392 /*
1393  * hl_device_fini - main tear-down function for habanalabs device
1394  *
1395  * @hdev: pointer to habanalabs device structure
1396  *
1397  * Destroy the device, call ASIC fini functions and release the id
1398  */
hl_device_fini(struct hl_device * hdev)1399 void hl_device_fini(struct hl_device *hdev)
1400 {
1401 	int i, rc;
1402 	ktime_t timeout;
1403 
1404 	dev_info(hdev->dev, "Removing device\n");
1405 
1406 	/*
1407 	 * This function is competing with the reset function, so try to
1408 	 * take the reset atomic and if we are already in middle of reset,
1409 	 * wait until reset function is finished. Reset function is designed
1410 	 * to always finish. However, in Gaudi, because of all the network
1411 	 * ports, the hard reset could take between 10-30 seconds
1412 	 */
1413 
1414 	timeout = ktime_add_us(ktime_get(),
1415 				HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000);
1416 	rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1417 	while (rc) {
1418 		usleep_range(50, 200);
1419 		rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1420 		if (ktime_compare(ktime_get(), timeout) > 0) {
1421 			WARN(1, "Failed to remove device because reset function did not finish\n");
1422 			return;
1423 		}
1424 	}
1425 
1426 	/* Mark device as disabled */
1427 	hdev->disabled = true;
1428 
1429 	/* Flush anyone that is inside the critical section of enqueue
1430 	 * jobs to the H/W
1431 	 */
1432 	hdev->asic_funcs->hw_queues_lock(hdev);
1433 	hdev->asic_funcs->hw_queues_unlock(hdev);
1434 
1435 	/* Flush anyone that is inside device open */
1436 	mutex_lock(&hdev->fpriv_list_lock);
1437 	mutex_unlock(&hdev->fpriv_list_lock);
1438 
1439 	hdev->hard_reset_pending = true;
1440 
1441 	hl_hwmon_fini(hdev);
1442 
1443 	device_late_fini(hdev);
1444 
1445 	hl_debugfs_remove_device(hdev);
1446 
1447 	/*
1448 	 * Halt the engines and disable interrupts so we won't get any more
1449 	 * completions from H/W and we won't have any accesses from the
1450 	 * H/W to the host machine
1451 	 */
1452 	hdev->asic_funcs->halt_engines(hdev, true);
1453 
1454 	/* Go over all the queues, release all CS and their jobs */
1455 	hl_cs_rollback_all(hdev);
1456 
1457 	/* Kill processes here after CS rollback. This is because the process
1458 	 * can't really exit until all its CSs are done, which is what we
1459 	 * do in cs rollback
1460 	 */
1461 	rc = device_kill_open_processes(hdev);
1462 	if (rc)
1463 		dev_crit(hdev->dev, "Failed to kill all open processes\n");
1464 
1465 	hl_cb_pool_fini(hdev);
1466 
1467 	/* Reset the H/W. It will be in idle state after this returns */
1468 	hdev->asic_funcs->hw_fini(hdev, true);
1469 
1470 	/* Release kernel context */
1471 	if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
1472 		dev_err(hdev->dev, "kernel ctx is still alive\n");
1473 
1474 	hl_vm_fini(hdev);
1475 
1476 	hl_mmu_fini(hdev);
1477 
1478 	hl_eq_fini(hdev, &hdev->event_queue);
1479 
1480 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1481 		hl_cq_fini(hdev, &hdev->completion_queue[i]);
1482 	kfree(hdev->completion_queue);
1483 
1484 	hl_hw_queues_destroy(hdev);
1485 
1486 	/* Call ASIC S/W finalize function */
1487 	hdev->asic_funcs->sw_fini(hdev);
1488 
1489 	device_early_fini(hdev);
1490 
1491 	/* Hide devices and sysfs nodes from user */
1492 	device_cdev_sysfs_del(hdev);
1493 
1494 	pr_info("removed device successfully\n");
1495 }
1496 
1497 /*
1498  * MMIO register access helper functions.
1499  */
1500 
1501 /*
1502  * hl_rreg - Read an MMIO register
1503  *
1504  * @hdev: pointer to habanalabs device structure
1505  * @reg: MMIO register offset (in bytes)
1506  *
1507  * Returns the value of the MMIO register we are asked to read
1508  *
1509  */
hl_rreg(struct hl_device * hdev,u32 reg)1510 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1511 {
1512 	return readl(hdev->rmmio + reg);
1513 }
1514 
1515 /*
1516  * hl_wreg - Write to an MMIO register
1517  *
1518  * @hdev: pointer to habanalabs device structure
1519  * @reg: MMIO register offset (in bytes)
1520  * @val: 32-bit value
1521  *
1522  * Writes the 32-bit value into the MMIO register
1523  *
1524  */
hl_wreg(struct hl_device * hdev,u32 reg,u32 val)1525 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1526 {
1527 	writel(val, hdev->rmmio + reg);
1528 }
1529