1 /*
2  * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  */
17 
18 #include <linux/module.h>
19 #include <linux/mempool.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/skbuff.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/workqueue.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 
32 #include "snic.h"
33 #include "snic_fwint.h"
34 
35 #define PCI_DEVICE_ID_CISCO_SNIC	0x0046
36 
37 /* Supported devices by snic module */
38 static struct pci_device_id snic_id_table[] = {
39 	{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
40 	{ 0, }	/* end of table */
41 };
42 
43 unsigned int snic_log_level = 0x0;
44 module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
46 
47 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
48 unsigned int snic_trace_max_pages = 16;
49 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
50 MODULE_PARM_DESC(snic_trace_max_pages,
51 		"Total allocated memory pages for snic trace buffer");
52 
53 #endif
54 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
55 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
57 
58 /*
59  * snic_slave_alloc : callback function to SCSI Mid Layer, called on
60  * scsi device initialization.
61  */
62 static int
snic_slave_alloc(struct scsi_device * sdev)63 snic_slave_alloc(struct scsi_device *sdev)
64 {
65 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
66 
67 	if (!tgt || snic_tgt_chkready(tgt))
68 		return -ENXIO;
69 
70 	return 0;
71 }
72 
73 /*
74  * snic_slave_configure : callback function to SCSI Mid Layer, called on
75  * scsi device initialization.
76  */
77 static int
snic_slave_configure(struct scsi_device * sdev)78 snic_slave_configure(struct scsi_device *sdev)
79 {
80 	struct snic *snic = shost_priv(sdev->host);
81 	u32 qdepth = 0, max_ios = 0;
82 	int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
83 
84 	/* Set Queue Depth */
85 	max_ios = snic_max_qdepth;
86 	qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
87 	scsi_change_queue_depth(sdev, qdepth);
88 
89 	if (snic->fwinfo.io_tmo > 1)
90 		tmo = snic->fwinfo.io_tmo * HZ;
91 
92 	/* FW requires extended timeouts */
93 	blk_queue_rq_timeout(sdev->request_queue, tmo);
94 
95 	return 0;
96 }
97 
98 static int
snic_change_queue_depth(struct scsi_device * sdev,int qdepth)99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
100 {
101 	struct snic *snic = shost_priv(sdev->host);
102 	int qsz = 0;
103 
104 	qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
105 	if (qsz < sdev->queue_depth)
106 		atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
107 	else if (qsz > sdev->queue_depth)
108 		atomic64_inc(&snic->s_stats.misc.qsz_rampup);
109 
110 	atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
111 
112 	scsi_change_queue_depth(sdev, qsz);
113 
114 	return sdev->queue_depth;
115 }
116 
117 static struct scsi_host_template snic_host_template = {
118 	.module = THIS_MODULE,
119 	.name = SNIC_DRV_NAME,
120 	.queuecommand = snic_queuecommand,
121 	.eh_abort_handler = snic_abort_cmd,
122 	.eh_device_reset_handler = snic_device_reset,
123 	.eh_host_reset_handler = snic_host_reset,
124 	.slave_alloc = snic_slave_alloc,
125 	.slave_configure = snic_slave_configure,
126 	.change_queue_depth = snic_change_queue_depth,
127 	.this_id = -1,
128 	.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
129 	.can_queue = SNIC_MAX_IO_REQ,
130 	.use_clustering = ENABLE_CLUSTERING,
131 	.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
132 	.max_sectors = 0x800,
133 	.shost_attrs = snic_attrs,
134 	.track_queue_depth = 1,
135 	.cmd_size = sizeof(struct snic_internal_io_state),
136 	.proc_name = "snic_scsi",
137 };
138 
139 /*
140  * snic_handle_link_event : Handles link events such as link up/down/error
141  */
142 void
snic_handle_link_event(struct snic * snic)143 snic_handle_link_event(struct snic *snic)
144 {
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&snic->snic_lock, flags);
148 	if (snic->stop_link_events) {
149 		spin_unlock_irqrestore(&snic->snic_lock, flags);
150 
151 		return;
152 	}
153 	spin_unlock_irqrestore(&snic->snic_lock, flags);
154 
155 	queue_work(snic_glob->event_q, &snic->link_work);
156 } /* end of snic_handle_link_event */
157 
158 /*
159  * snic_notify_set : sets notification area
160  * This notification area is to receive events from fw
161  * Note: snic supports only MSIX interrupts, in which we can just call
162  *  svnic_dev_notify_set directly
163  */
164 static int
snic_notify_set(struct snic * snic)165 snic_notify_set(struct snic *snic)
166 {
167 	int ret = 0;
168 	enum vnic_dev_intr_mode intr_mode;
169 
170 	intr_mode = svnic_dev_get_intr_mode(snic->vdev);
171 
172 	if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
173 		ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
174 	} else {
175 		SNIC_HOST_ERR(snic->shost,
176 			      "Interrupt mode should be setup before devcmd notify set %d\n",
177 			      intr_mode);
178 		ret = -1;
179 	}
180 
181 	return ret;
182 } /* end of snic_notify_set */
183 
184 /*
185  * snic_dev_wait : polls vnic open status.
186  */
187 static int
snic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)188 snic_dev_wait(struct vnic_dev *vdev,
189 		int (*start)(struct vnic_dev *, int),
190 		int (*finished)(struct vnic_dev *, int *),
191 		int arg)
192 {
193 	unsigned long time;
194 	int ret, done;
195 	int retry_cnt = 0;
196 
197 	ret = start(vdev, arg);
198 	if (ret)
199 		return ret;
200 
201 	/*
202 	 * Wait for func to complete...2 seconds max.
203 	 *
204 	 * Sometimes schedule_timeout_uninterruptible take long	time
205 	 * to wakeup, which results skipping retry. The retry counter
206 	 * ensures to retry at least two times.
207 	 */
208 	time = jiffies + (HZ * 2);
209 	do {
210 		ret = finished(vdev, &done);
211 		if (ret)
212 			return ret;
213 
214 		if (done)
215 			return 0;
216 		schedule_timeout_uninterruptible(HZ/10);
217 		++retry_cnt;
218 	} while (time_after(time, jiffies) || (retry_cnt < 3));
219 
220 	return -ETIMEDOUT;
221 } /* end of snic_dev_wait */
222 
223 /*
224  * snic_cleanup: called by snic_remove
225  * Stops the snic device, masks all interrupts, Completed CQ entries are
226  * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
227  */
228 static int
snic_cleanup(struct snic * snic)229 snic_cleanup(struct snic *snic)
230 {
231 	unsigned int i;
232 	int ret;
233 
234 	svnic_dev_disable(snic->vdev);
235 	for (i = 0; i < snic->intr_count; i++)
236 		svnic_intr_mask(&snic->intr[i]);
237 
238 	for (i = 0; i < snic->wq_count; i++) {
239 		ret = svnic_wq_disable(&snic->wq[i]);
240 		if (ret)
241 			return ret;
242 	}
243 
244 	/* Clean up completed IOs */
245 	snic_fwcq_cmpl_handler(snic, -1);
246 
247 	snic_wq_cmpl_handler(snic, -1);
248 
249 	/* Clean up the IOs that have not completed */
250 	for (i = 0; i < snic->wq_count; i++)
251 		svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
252 
253 	for (i = 0; i < snic->cq_count; i++)
254 		svnic_cq_clean(&snic->cq[i]);
255 
256 	for (i = 0; i < snic->intr_count; i++)
257 		svnic_intr_clean(&snic->intr[i]);
258 
259 	/* Cleanup snic specific requests */
260 	snic_free_all_untagged_reqs(snic);
261 
262 	/* Cleanup Pending SCSI commands */
263 	snic_shutdown_scsi_cleanup(snic);
264 
265 	for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
266 		mempool_destroy(snic->req_pool[i]);
267 
268 	return 0;
269 } /* end of snic_cleanup */
270 
271 
272 static void
snic_iounmap(struct snic * snic)273 snic_iounmap(struct snic *snic)
274 {
275 	if (snic->bar0.vaddr)
276 		iounmap(snic->bar0.vaddr);
277 }
278 
279 /*
280  * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
281  */
282 static int
snic_vdev_open_done(struct vnic_dev * vdev,int * done)283 snic_vdev_open_done(struct vnic_dev *vdev, int *done)
284 {
285 	struct snic *snic = svnic_dev_priv(vdev);
286 	int ret;
287 	int nretries = 5;
288 
289 	do {
290 		ret = svnic_dev_open_done(vdev, done);
291 		if (ret == 0)
292 			break;
293 
294 		SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
295 	} while (nretries--);
296 
297 	return ret;
298 } /* end of snic_vdev_open_done */
299 
300 /*
301  * snic_add_host : registers scsi host with ML
302  */
303 static int
snic_add_host(struct Scsi_Host * shost,struct pci_dev * pdev)304 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
305 {
306 	int ret = 0;
307 
308 	ret = scsi_add_host(shost, &pdev->dev);
309 	if (ret) {
310 		SNIC_HOST_ERR(shost,
311 			      "snic: scsi_add_host failed. %d\n",
312 			      ret);
313 
314 		return ret;
315 	}
316 
317 	SNIC_BUG_ON(shost->work_q != NULL);
318 	snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
319 		 shost->host_no);
320 	shost->work_q = create_singlethread_workqueue(shost->work_q_name);
321 	if (!shost->work_q) {
322 		SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
323 
324 		ret = -ENOMEM;
325 	}
326 
327 	return ret;
328 } /* end of snic_add_host */
329 
330 static void
snic_del_host(struct Scsi_Host * shost)331 snic_del_host(struct Scsi_Host *shost)
332 {
333 	if (!shost->work_q)
334 		return;
335 
336 	destroy_workqueue(shost->work_q);
337 	shost->work_q = NULL;
338 	scsi_remove_host(shost);
339 }
340 
341 int
snic_get_state(struct snic * snic)342 snic_get_state(struct snic *snic)
343 {
344 	return atomic_read(&snic->state);
345 }
346 
347 void
snic_set_state(struct snic * snic,enum snic_state state)348 snic_set_state(struct snic *snic, enum snic_state state)
349 {
350 	SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
351 		       snic_state_to_str(snic_get_state(snic)),
352 		       snic_state_to_str(state));
353 
354 	atomic_set(&snic->state, state);
355 }
356 
357 /*
358  * snic_probe : Initialize the snic interface.
359  */
360 static int
snic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)361 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
362 {
363 	struct Scsi_Host *shost;
364 	struct snic *snic;
365 	mempool_t *pool;
366 	unsigned long flags;
367 	u32 max_ios = 0;
368 	int ret, i;
369 
370 	/* Device Information */
371 	SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
372 		  pdev->vendor, pdev->device, pdev->subsystem_vendor,
373 		  pdev->subsystem_device);
374 
375 	SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
376 		  pdev->bus->number, PCI_SLOT(pdev->devfn),
377 		  PCI_FUNC(pdev->devfn));
378 
379 	/*
380 	 * Allocate SCSI Host and setup association between host, and snic
381 	 */
382 	shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
383 	if (!shost) {
384 		SNIC_ERR("Unable to alloc scsi_host\n");
385 		ret = -ENOMEM;
386 
387 		goto prob_end;
388 	}
389 	snic = shost_priv(shost);
390 	snic->shost = shost;
391 
392 	snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
393 		 shost->host_no);
394 
395 	SNIC_HOST_INFO(shost,
396 		       "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
397 		       shost->host_no, snic, shost, pdev->bus->number,
398 		       PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
399 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
400 	/* Per snic debugfs init */
401 	ret = snic_stats_debugfs_init(snic);
402 	if (ret) {
403 		SNIC_HOST_ERR(snic->shost,
404 			      "Failed to initialize debugfs stats\n");
405 		snic_stats_debugfs_remove(snic);
406 	}
407 #endif
408 
409 	/* Setup PCI Resources */
410 	pci_set_drvdata(pdev, snic);
411 	snic->pdev = pdev;
412 
413 	ret = pci_enable_device(pdev);
414 	if (ret) {
415 		SNIC_HOST_ERR(shost,
416 			      "Cannot enable PCI Resources, aborting : %d\n",
417 			      ret);
418 
419 		goto err_free_snic;
420 	}
421 
422 	ret = pci_request_regions(pdev, SNIC_DRV_NAME);
423 	if (ret) {
424 		SNIC_HOST_ERR(shost,
425 			      "Cannot obtain PCI Resources, aborting : %d\n",
426 			      ret);
427 
428 		goto err_pci_disable;
429 	}
430 
431 	pci_set_master(pdev);
432 
433 	/*
434 	 * Query PCI Controller on system for DMA addressing
435 	 * limitation for the device. Try 43-bit first, and
436 	 * fail to 32-bit.
437 	 */
438 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
439 	if (ret) {
440 		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
441 		if (ret) {
442 			SNIC_HOST_ERR(shost,
443 				      "No Usable DMA Configuration, aborting %d\n",
444 				      ret);
445 
446 			goto err_rel_regions;
447 		}
448 
449 		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
450 		if (ret) {
451 			SNIC_HOST_ERR(shost,
452 				      "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
453 				      ret);
454 
455 			goto err_rel_regions;
456 		}
457 	} else {
458 		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
459 		if (ret) {
460 			SNIC_HOST_ERR(shost,
461 				      "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
462 				      ret);
463 
464 			goto err_rel_regions;
465 		}
466 	}
467 
468 
469 	/* Map vNIC resources from BAR0 */
470 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
471 		SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
472 
473 		ret = -ENODEV;
474 		goto err_rel_regions;
475 	}
476 
477 	snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
478 	if (!snic->bar0.vaddr) {
479 		SNIC_HOST_ERR(shost,
480 			      "Cannot memory map BAR0 res hdr aborting.\n");
481 
482 		ret = -ENODEV;
483 		goto err_rel_regions;
484 	}
485 
486 	snic->bar0.bus_addr = pci_resource_start(pdev, 0);
487 	snic->bar0.len = pci_resource_len(pdev, 0);
488 	SNIC_BUG_ON(snic->bar0.bus_addr == 0);
489 
490 	/* Devcmd2 Resource Allocation and Initialization */
491 	snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
492 	if (!snic->vdev) {
493 		SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
494 
495 		ret = -ENODEV;
496 		goto err_iounmap;
497 	}
498 
499 	ret = svnic_dev_cmd_init(snic->vdev, 0);
500 	if (ret) {
501 		SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
502 
503 		goto err_vnic_unreg;
504 	}
505 
506 	ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
507 	if (ret) {
508 		SNIC_HOST_ERR(shost,
509 			      "vNIC dev open failed, aborting. %d\n",
510 			      ret);
511 
512 		goto err_vnic_unreg;
513 	}
514 
515 	ret = svnic_dev_init(snic->vdev, 0);
516 	if (ret) {
517 		SNIC_HOST_ERR(shost,
518 			      "vNIC dev init failed. aborting. %d\n",
519 			      ret);
520 
521 		goto err_dev_close;
522 	}
523 
524 	/* Get vNIC information */
525 	ret = snic_get_vnic_config(snic);
526 	if (ret) {
527 		SNIC_HOST_ERR(shost,
528 			      "Get vNIC configuration failed, aborting. %d\n",
529 			      ret);
530 
531 		goto err_dev_close;
532 	}
533 
534 	/* Configure Maximum Outstanding IO reqs */
535 	max_ios = snic->config.io_throttle_count;
536 	if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
537 		shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
538 					 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
539 
540 	snic->max_tag_id = shost->can_queue;
541 
542 	shost->max_lun = snic->config.luns_per_tgt;
543 	shost->max_id = SNIC_MAX_TARGET;
544 
545 	shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
546 
547 	snic_get_res_counts(snic);
548 
549 	/*
550 	 * Assumption: Only MSIx is supported
551 	 */
552 	ret = snic_set_intr_mode(snic);
553 	if (ret) {
554 		SNIC_HOST_ERR(shost,
555 			      "Failed to set intr mode aborting. %d\n",
556 			      ret);
557 
558 		goto err_dev_close;
559 	}
560 
561 	ret = snic_alloc_vnic_res(snic);
562 	if (ret) {
563 		SNIC_HOST_ERR(shost,
564 			      "Failed to alloc vNIC resources aborting. %d\n",
565 			      ret);
566 
567 		goto err_clear_intr;
568 	}
569 
570 	/* Initialize specific lists */
571 	INIT_LIST_HEAD(&snic->list);
572 
573 	/*
574 	 * spl_cmd_list for maintaining snic specific cmds
575 	 * such as EXCH_VER_REQ, REPORT_TARGETS etc
576 	 */
577 	INIT_LIST_HEAD(&snic->spl_cmd_list);
578 	spin_lock_init(&snic->spl_cmd_lock);
579 
580 	/* initialize all snic locks */
581 	spin_lock_init(&snic->snic_lock);
582 
583 	for (i = 0; i < SNIC_WQ_MAX; i++)
584 		spin_lock_init(&snic->wq_lock[i]);
585 
586 	for (i = 0; i < SNIC_IO_LOCKS; i++)
587 		spin_lock_init(&snic->io_req_lock[i]);
588 
589 	pool = mempool_create_slab_pool(2,
590 				snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
591 	if (!pool) {
592 		SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
593 
594 		ret = -ENOMEM;
595 		goto err_free_res;
596 	}
597 
598 	snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
599 
600 	pool = mempool_create_slab_pool(2,
601 				snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
602 	if (!pool) {
603 		SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
604 
605 		ret = -ENOMEM;
606 		goto err_free_dflt_sgl_pool;
607 	}
608 
609 	snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
610 
611 	pool = mempool_create_slab_pool(2,
612 				snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
613 	if (!pool) {
614 		SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
615 
616 		ret = -ENOMEM;
617 		goto err_free_max_sgl_pool;
618 	}
619 
620 	snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
621 
622 	/* Initialize snic state */
623 	atomic_set(&snic->state, SNIC_INIT);
624 
625 	atomic_set(&snic->ios_inflight, 0);
626 
627 	/* Setup notification buffer area */
628 	ret = snic_notify_set(snic);
629 	if (ret) {
630 		SNIC_HOST_ERR(shost,
631 			      "Failed to alloc notify buffer aborting. %d\n",
632 			      ret);
633 
634 		goto err_free_tmreq_pool;
635 	}
636 
637 	spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
638 	list_add_tail(&snic->list, &snic_glob->snic_list);
639 	spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
640 
641 	snic_disc_init(&snic->disc);
642 	INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
643 	INIT_WORK(&snic->disc_work, snic_handle_disc);
644 	INIT_WORK(&snic->link_work, snic_handle_link);
645 
646 	/* Enable all queues */
647 	for (i = 0; i < snic->wq_count; i++)
648 		svnic_wq_enable(&snic->wq[i]);
649 
650 	ret = svnic_dev_enable_wait(snic->vdev);
651 	if (ret) {
652 		SNIC_HOST_ERR(shost,
653 			      "vNIC dev enable failed w/ error %d\n",
654 			      ret);
655 
656 		goto err_vdev_enable;
657 	}
658 
659 	ret = snic_request_intr(snic);
660 	if (ret) {
661 		SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
662 
663 		goto err_req_intr;
664 	}
665 
666 	for (i = 0; i < snic->intr_count; i++)
667 		svnic_intr_unmask(&snic->intr[i]);
668 
669 	/* Get snic params */
670 	ret = snic_get_conf(snic);
671 	if (ret) {
672 		SNIC_HOST_ERR(shost,
673 			      "Failed to get snic io config from FW w err %d\n",
674 			      ret);
675 
676 		goto err_get_conf;
677 	}
678 
679 	/*
680 	 * Initialization done with PCI system, hardware, firmware.
681 	 * Add shost to SCSI
682 	 */
683 	ret = snic_add_host(shost, pdev);
684 	if (ret) {
685 		SNIC_HOST_ERR(shost,
686 			      "Adding scsi host Failed ... exiting. %d\n",
687 			      ret);
688 
689 		goto err_get_conf;
690 	}
691 
692 	snic_set_state(snic, SNIC_ONLINE);
693 
694 	ret = snic_disc_start(snic);
695 	if (ret) {
696 		SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
697 			      ret);
698 
699 		goto err_get_conf;
700 	}
701 
702 	SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
703 
704 	return 0;
705 
706 err_get_conf:
707 	snic_free_all_untagged_reqs(snic);
708 
709 	for (i = 0; i < snic->intr_count; i++)
710 		svnic_intr_mask(&snic->intr[i]);
711 
712 	snic_free_intr(snic);
713 
714 err_req_intr:
715 	svnic_dev_disable(snic->vdev);
716 
717 err_vdev_enable:
718 	svnic_dev_notify_unset(snic->vdev);
719 
720 	for (i = 0; i < snic->wq_count; i++) {
721 		int rc = 0;
722 
723 		rc = svnic_wq_disable(&snic->wq[i]);
724 		if (rc) {
725 			SNIC_HOST_ERR(shost,
726 				      "WQ Disable Failed w/ err = %d\n", rc);
727 
728 			 break;
729 		}
730 	}
731 	snic_del_host(snic->shost);
732 
733 err_free_tmreq_pool:
734 	mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
735 
736 err_free_max_sgl_pool:
737 	mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
738 
739 err_free_dflt_sgl_pool:
740 	mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
741 
742 err_free_res:
743 	snic_free_vnic_res(snic);
744 
745 err_clear_intr:
746 	snic_clear_intr_mode(snic);
747 
748 err_dev_close:
749 	svnic_dev_close(snic->vdev);
750 
751 err_vnic_unreg:
752 	svnic_dev_unregister(snic->vdev);
753 
754 err_iounmap:
755 	snic_iounmap(snic);
756 
757 err_rel_regions:
758 	pci_release_regions(pdev);
759 
760 err_pci_disable:
761 	pci_disable_device(pdev);
762 
763 err_free_snic:
764 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
765 	snic_stats_debugfs_remove(snic);
766 #endif
767 	scsi_host_put(shost);
768 	pci_set_drvdata(pdev, NULL);
769 
770 prob_end:
771 	SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
772 		  pdev->bus->number, PCI_SLOT(pdev->devfn),
773 		  PCI_FUNC(pdev->devfn));
774 
775 	return ret;
776 } /* end of snic_probe */
777 
778 
779 /*
780  * snic_remove : invoked on unbinding the interface to cleanup the
781  * resources allocated in snic_probe on initialization.
782  */
783 static void
snic_remove(struct pci_dev * pdev)784 snic_remove(struct pci_dev *pdev)
785 {
786 	struct snic *snic = pci_get_drvdata(pdev);
787 	unsigned long flags;
788 
789 	if (!snic) {
790 		SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
791 			  pdev->bus->number, PCI_SLOT(pdev->devfn),
792 			  PCI_FUNC(pdev->devfn));
793 
794 		return;
795 	}
796 
797 	/*
798 	 * Mark state so that the workqueue thread stops forwarding
799 	 * received frames and link events. ISR and other threads
800 	 * that can queue work items will also stop creating work
801 	 * items on the snic workqueue
802 	 */
803 	snic_set_state(snic, SNIC_OFFLINE);
804 	spin_lock_irqsave(&snic->snic_lock, flags);
805 	snic->stop_link_events = 1;
806 	spin_unlock_irqrestore(&snic->snic_lock, flags);
807 
808 	flush_workqueue(snic_glob->event_q);
809 	snic_disc_term(snic);
810 
811 	spin_lock_irqsave(&snic->snic_lock, flags);
812 	snic->in_remove = 1;
813 	spin_unlock_irqrestore(&snic->snic_lock, flags);
814 
815 	/*
816 	 * This stops the snic device, masks all interrupts, Completed
817 	 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
818 	 * cleanup
819 	 */
820 	snic_cleanup(snic);
821 
822 	spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
823 	list_del(&snic->list);
824 	spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
825 
826 	snic_tgt_del_all(snic);
827 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
828 	snic_stats_debugfs_remove(snic);
829 #endif
830 	snic_del_host(snic->shost);
831 
832 	svnic_dev_notify_unset(snic->vdev);
833 	snic_free_intr(snic);
834 	snic_free_vnic_res(snic);
835 	snic_clear_intr_mode(snic);
836 	svnic_dev_close(snic->vdev);
837 	svnic_dev_unregister(snic->vdev);
838 	snic_iounmap(snic);
839 	pci_release_regions(pdev);
840 	pci_disable_device(pdev);
841 	pci_set_drvdata(pdev, NULL);
842 
843 	/* this frees Scsi_Host and snic memory (continuous chunk) */
844 	scsi_host_put(snic->shost);
845 } /* end of snic_remove */
846 
847 
848 struct snic_global *snic_glob;
849 
850 /*
851  * snic_global_data_init: Initialize SNIC Global Data
852  * Notes: All the global lists, variables should be part of global data
853  * this helps in debugging.
854  */
855 static int
snic_global_data_init(void)856 snic_global_data_init(void)
857 {
858 	int ret = 0;
859 	struct kmem_cache *cachep;
860 	ssize_t len = 0;
861 
862 	snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
863 
864 	if (!snic_glob) {
865 		SNIC_ERR("Failed to allocate Global Context.\n");
866 
867 		ret = -ENOMEM;
868 		goto gdi_end;
869 	}
870 
871 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
872 	/* Debugfs related Initialization */
873 	/* Create debugfs entries for snic */
874 	ret = snic_debugfs_init();
875 	if (ret < 0) {
876 		SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
877 		snic_debugfs_term();
878 		/* continue even if it fails */
879 	}
880 
881 	/* Trace related Initialization */
882 	/* Allocate memory for trace buffer */
883 	ret = snic_trc_init();
884 	if (ret < 0) {
885 		SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
886 		snic_trc_free();
887 		/* continue even if it fails */
888 	}
889 
890 #endif
891 	INIT_LIST_HEAD(&snic_glob->snic_list);
892 	spin_lock_init(&snic_glob->snic_list_lock);
893 
894 	/* Create a cache for allocation of snic_host_req+default size ESGLs */
895 	len = sizeof(struct snic_req_info);
896 	len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
897 	cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
898 				   SLAB_HWCACHE_ALIGN, NULL);
899 	if (!cachep) {
900 		SNIC_ERR("Failed to create snic default sgl slab\n");
901 		ret = -ENOMEM;
902 
903 		goto err_dflt_req_slab;
904 	}
905 	snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
906 
907 	/* Create a cache for allocation of max size Extended SGLs */
908 	len = sizeof(struct snic_req_info);
909 	len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
910 	cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
911 				   SLAB_HWCACHE_ALIGN, NULL);
912 	if (!cachep) {
913 		SNIC_ERR("Failed to create snic max sgl slab\n");
914 		ret = -ENOMEM;
915 
916 		goto err_max_req_slab;
917 	}
918 	snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
919 
920 	len = sizeof(struct snic_host_req);
921 	cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
922 				   SLAB_HWCACHE_ALIGN, NULL);
923 	if (!cachep) {
924 		SNIC_ERR("Failed to create snic tm req slab\n");
925 		ret = -ENOMEM;
926 
927 		goto err_tmreq_slab;
928 	}
929 	snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
930 
931 	/* snic_event queue */
932 	snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
933 	if (!snic_glob->event_q) {
934 		SNIC_ERR("snic event queue create failed\n");
935 		ret = -ENOMEM;
936 
937 		goto err_eventq;
938 	}
939 
940 	return ret;
941 
942 err_eventq:
943 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
944 
945 err_tmreq_slab:
946 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
947 
948 err_max_req_slab:
949 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
950 
951 err_dflt_req_slab:
952 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
953 	snic_trc_free();
954 	snic_debugfs_term();
955 #endif
956 	kfree(snic_glob);
957 	snic_glob = NULL;
958 
959 gdi_end:
960 	return ret;
961 } /* end of snic_glob_init */
962 
963 /*
964  * snic_global_data_cleanup : Frees SNIC Global Data
965  */
966 static void
snic_global_data_cleanup(void)967 snic_global_data_cleanup(void)
968 {
969 	SNIC_BUG_ON(snic_glob == NULL);
970 
971 	destroy_workqueue(snic_glob->event_q);
972 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
973 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
974 	kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
975 
976 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
977 	/* Freeing Trace Resources */
978 	snic_trc_free();
979 
980 	/* Freeing Debugfs Resources */
981 	snic_debugfs_term();
982 #endif
983 	kfree(snic_glob);
984 	snic_glob = NULL;
985 } /* end of snic_glob_cleanup */
986 
987 static struct pci_driver snic_driver = {
988 	.name = SNIC_DRV_NAME,
989 	.id_table = snic_id_table,
990 	.probe = snic_probe,
991 	.remove = snic_remove,
992 };
993 
994 static int __init
snic_init_module(void)995 snic_init_module(void)
996 {
997 	int ret = 0;
998 
999 #ifndef __x86_64__
1000 	SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
1001 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1002 #endif
1003 
1004 	SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
1005 
1006 	ret = snic_global_data_init();
1007 	if (ret) {
1008 		SNIC_ERR("Failed to Initialize Global Data.\n");
1009 
1010 		return ret;
1011 	}
1012 
1013 	ret = pci_register_driver(&snic_driver);
1014 	if (ret < 0) {
1015 		SNIC_ERR("PCI driver register error\n");
1016 
1017 		goto err_pci_reg;
1018 	}
1019 
1020 	return ret;
1021 
1022 err_pci_reg:
1023 	snic_global_data_cleanup();
1024 
1025 	return ret;
1026 }
1027 
1028 static void __exit
snic_cleanup_module(void)1029 snic_cleanup_module(void)
1030 {
1031 	pci_unregister_driver(&snic_driver);
1032 	snic_global_data_cleanup();
1033 }
1034 
1035 module_init(snic_init_module);
1036 module_exit(snic_cleanup_module);
1037 
1038 MODULE_LICENSE("GPL v2");
1039 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
1040 MODULE_VERSION(SNIC_DRV_VERSION);
1041 MODULE_DEVICE_TABLE(pci, snic_id_table);
1042 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
1043 	      "Sesidhar Baddela <sebaddel@cisco.com>");
1044