1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2003-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  Authors: Avago Technologies
21  *           Sreenivas Bagalkote
22  *           Sumant Patro
23  *           Bo Yang
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61 
62 /*
63  * Number of sectors per IO command
64  * Will be set in megasas_init_mfi if user does not provide
65  */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 	"Maximum number of sectors per IO command");
70 
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74 
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78 
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82 
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
87 
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 		 "before resetting adapter. Default: 180");
92 
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
96 
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100 
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104 
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108 
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113 
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 				 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 				u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 				struct scsi_device *sdev);
123 
124 /*
125  * PCI ID table for all supported controllers
126  */
127 static struct pci_device_id megasas_pci_table[] = {
128 
129 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
130 	/* xscale IOP */
131 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
132 	/* ppc IOP */
133 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
134 	/* ppc IOP */
135 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
136 	/* gen2*/
137 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
138 	/* gen2*/
139 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
140 	/* skinny*/
141 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
142 	/* skinny*/
143 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
144 	/* xscale IOP, vega */
145 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
146 	/* xscale IOP */
147 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
148 	/* Fusion */
149 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
150 	/* Plasma */
151 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
152 	/* Invader */
153 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
154 	/* Fury */
155 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
156 	/* Intruder */
157 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
158 	/* Intruder 24 port*/
159 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
161 	/* VENTURA */
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
168 	{}
169 };
170 
171 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
172 
173 static int megasas_mgmt_majorno;
174 struct megasas_mgmt_info megasas_mgmt_info;
175 static struct fasync_struct *megasas_async_queue;
176 static DEFINE_MUTEX(megasas_async_queue_mutex);
177 
178 static int megasas_poll_wait_aen;
179 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
180 static u32 support_poll_for_event;
181 u32 megasas_dbg_lvl;
182 static u32 support_device_change;
183 static bool support_nvme_encapsulation;
184 
185 /* define lock for aen poll */
186 spinlock_t poll_aen_lock;
187 
188 void
189 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
190 		     u8 alt_status);
191 static u32
192 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
193 static int
194 megasas_adp_reset_gen2(struct megasas_instance *instance,
195 		       struct megasas_register_set __iomem *reg_set);
196 static irqreturn_t megasas_isr(int irq, void *devp);
197 static u32
198 megasas_init_adapter_mfi(struct megasas_instance *instance);
199 u32
200 megasas_build_and_issue_cmd(struct megasas_instance *instance,
201 			    struct scsi_cmnd *scmd);
202 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
203 int
204 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
205 	int seconds);
206 void megasas_fusion_ocr_wq(struct work_struct *work);
207 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
208 					 int initial);
209 static int
210 megasas_set_dma_mask(struct megasas_instance *instance);
211 static int
212 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
213 static inline void
214 megasas_free_ctrl_mem(struct megasas_instance *instance);
215 static inline int
216 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
217 static inline void
218 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
219 static inline void
220 megasas_init_ctrl_params(struct megasas_instance *instance);
221 
222 /**
223  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
224  * @instance:			Adapter soft state
225  * @dcmd:			DCMD frame inside MFI command
226  * @dma_addr:			DMA address of buffer to be passed to FW
227  * @dma_len:			Length of DMA buffer to be passed to FW
228  * @return:			void
229  */
megasas_set_dma_settings(struct megasas_instance * instance,struct megasas_dcmd_frame * dcmd,dma_addr_t dma_addr,u32 dma_len)230 void megasas_set_dma_settings(struct megasas_instance *instance,
231 			      struct megasas_dcmd_frame *dcmd,
232 			      dma_addr_t dma_addr, u32 dma_len)
233 {
234 	if (instance->consistent_mask_64bit) {
235 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
236 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
237 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
238 
239 	} else {
240 		dcmd->sgl.sge32[0].phys_addr =
241 				cpu_to_le32(lower_32_bits(dma_addr));
242 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
243 		dcmd->flags = cpu_to_le16(dcmd->flags);
244 	}
245 }
246 
247 void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)248 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
249 {
250 	instance->instancet->fire_cmd(instance,
251 		cmd->frame_phys_addr, 0, instance->reg_set);
252 	return;
253 }
254 
255 /**
256  * megasas_get_cmd -	Get a command from the free pool
257  * @instance:		Adapter soft state
258  *
259  * Returns a free command from the pool
260  */
megasas_get_cmd(struct megasas_instance * instance)261 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
262 						  *instance)
263 {
264 	unsigned long flags;
265 	struct megasas_cmd *cmd = NULL;
266 
267 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
268 
269 	if (!list_empty(&instance->cmd_pool)) {
270 		cmd = list_entry((&instance->cmd_pool)->next,
271 				 struct megasas_cmd, list);
272 		list_del_init(&cmd->list);
273 	} else {
274 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
275 	}
276 
277 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
278 	return cmd;
279 }
280 
281 /**
282  * megasas_return_cmd -	Return a cmd to free command pool
283  * @instance:		Adapter soft state
284  * @cmd:		Command packet to be returned to free command pool
285  */
286 void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)287 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
288 {
289 	unsigned long flags;
290 	u32 blk_tags;
291 	struct megasas_cmd_fusion *cmd_fusion;
292 	struct fusion_context *fusion = instance->ctrl_context;
293 
294 	/* This flag is used only for fusion adapter.
295 	 * Wait for Interrupt for Polled mode DCMD
296 	 */
297 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
298 		return;
299 
300 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
301 
302 	if (fusion) {
303 		blk_tags = instance->max_scsi_cmds + cmd->index;
304 		cmd_fusion = fusion->cmd_list[blk_tags];
305 		megasas_return_cmd_fusion(instance, cmd_fusion);
306 	}
307 	cmd->scmd = NULL;
308 	cmd->frame_count = 0;
309 	cmd->flags = 0;
310 	memset(cmd->frame, 0, instance->mfi_frame_size);
311 	cmd->frame->io.context = cpu_to_le32(cmd->index);
312 	if (!fusion && reset_devices)
313 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
314 	list_add(&cmd->list, (&instance->cmd_pool)->next);
315 
316 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
317 
318 }
319 
320 static const char *
format_timestamp(uint32_t timestamp)321 format_timestamp(uint32_t timestamp)
322 {
323 	static char buffer[32];
324 
325 	if ((timestamp & 0xff000000) == 0xff000000)
326 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
327 		0x00ffffff);
328 	else
329 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
330 	return buffer;
331 }
332 
333 static const char *
format_class(int8_t class)334 format_class(int8_t class)
335 {
336 	static char buffer[6];
337 
338 	switch (class) {
339 	case MFI_EVT_CLASS_DEBUG:
340 		return "debug";
341 	case MFI_EVT_CLASS_PROGRESS:
342 		return "progress";
343 	case MFI_EVT_CLASS_INFO:
344 		return "info";
345 	case MFI_EVT_CLASS_WARNING:
346 		return "WARN";
347 	case MFI_EVT_CLASS_CRITICAL:
348 		return "CRIT";
349 	case MFI_EVT_CLASS_FATAL:
350 		return "FATAL";
351 	case MFI_EVT_CLASS_DEAD:
352 		return "DEAD";
353 	default:
354 		snprintf(buffer, sizeof(buffer), "%d", class);
355 		return buffer;
356 	}
357 }
358 
359 /**
360   * megasas_decode_evt: Decode FW AEN event and print critical event
361   * for information.
362   * @instance:			Adapter soft state
363   */
364 static void
megasas_decode_evt(struct megasas_instance * instance)365 megasas_decode_evt(struct megasas_instance *instance)
366 {
367 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
368 	union megasas_evt_class_locale class_locale;
369 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
370 
371 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
372 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
373 			le32_to_cpu(evt_detail->seq_num),
374 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
375 			(class_locale.members.locale),
376 			format_class(class_locale.members.class),
377 			evt_detail->description);
378 }
379 
380 /**
381 *	The following functions are defined for xscale
382 *	(deviceid : 1064R, PERC5) controllers
383 */
384 
385 /**
386  * megasas_enable_intr_xscale -	Enables interrupts
387  * @regs:			MFI register set
388  */
389 static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)390 megasas_enable_intr_xscale(struct megasas_instance *instance)
391 {
392 	struct megasas_register_set __iomem *regs;
393 
394 	regs = instance->reg_set;
395 	writel(0, &(regs)->outbound_intr_mask);
396 
397 	/* Dummy readl to force pci flush */
398 	readl(&regs->outbound_intr_mask);
399 }
400 
401 /**
402  * megasas_disable_intr_xscale -Disables interrupt
403  * @regs:			MFI register set
404  */
405 static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)406 megasas_disable_intr_xscale(struct megasas_instance *instance)
407 {
408 	struct megasas_register_set __iomem *regs;
409 	u32 mask = 0x1f;
410 
411 	regs = instance->reg_set;
412 	writel(mask, &regs->outbound_intr_mask);
413 	/* Dummy readl to force pci flush */
414 	readl(&regs->outbound_intr_mask);
415 }
416 
417 /**
418  * megasas_read_fw_status_reg_xscale - returns the current FW status value
419  * @regs:			MFI register set
420  */
421 static u32
megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)422 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
423 {
424 	return readl(&(regs)->outbound_msg_0);
425 }
426 /**
427  * megasas_clear_interrupt_xscale -	Check & clear interrupt
428  * @regs:				MFI register set
429  */
430 static int
megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)431 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
432 {
433 	u32 status;
434 	u32 mfiStatus = 0;
435 
436 	/*
437 	 * Check if it is our interrupt
438 	 */
439 	status = readl(&regs->outbound_intr_status);
440 
441 	if (status & MFI_OB_INTR_STATUS_MASK)
442 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
443 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
444 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
445 
446 	/*
447 	 * Clear the interrupt by writing back the same value
448 	 */
449 	if (mfiStatus)
450 		writel(status, &regs->outbound_intr_status);
451 
452 	/* Dummy readl to force pci flush */
453 	readl(&regs->outbound_intr_status);
454 
455 	return mfiStatus;
456 }
457 
458 /**
459  * megasas_fire_cmd_xscale -	Sends command to the FW
460  * @frame_phys_addr :		Physical address of cmd
461  * @frame_count :		Number of frames for the command
462  * @regs :			MFI register set
463  */
464 static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)465 megasas_fire_cmd_xscale(struct megasas_instance *instance,
466 		dma_addr_t frame_phys_addr,
467 		u32 frame_count,
468 		struct megasas_register_set __iomem *regs)
469 {
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&instance->hba_lock, flags);
473 	writel((frame_phys_addr >> 3)|(frame_count),
474 	       &(regs)->inbound_queue_port);
475 	spin_unlock_irqrestore(&instance->hba_lock, flags);
476 }
477 
478 /**
479  * megasas_adp_reset_xscale -  For controller reset
480  * @regs:                              MFI register set
481  */
482 static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)483 megasas_adp_reset_xscale(struct megasas_instance *instance,
484 	struct megasas_register_set __iomem *regs)
485 {
486 	u32 i;
487 	u32 pcidata;
488 
489 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
490 
491 	for (i = 0; i < 3; i++)
492 		msleep(1000); /* sleep for 3 secs */
493 	pcidata  = 0;
494 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
495 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
496 	if (pcidata & 0x2) {
497 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
498 		pcidata &= ~0x2;
499 		pci_write_config_dword(instance->pdev,
500 				MFI_1068_PCSR_OFFSET, pcidata);
501 
502 		for (i = 0; i < 2; i++)
503 			msleep(1000); /* need to wait 2 secs again */
504 
505 		pcidata  = 0;
506 		pci_read_config_dword(instance->pdev,
507 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
508 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
509 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
510 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
511 			pcidata = 0;
512 			pci_write_config_dword(instance->pdev,
513 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
514 		}
515 	}
516 	return 0;
517 }
518 
519 /**
520  * megasas_check_reset_xscale -	For controller reset check
521  * @regs:				MFI register set
522  */
523 static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)524 megasas_check_reset_xscale(struct megasas_instance *instance,
525 		struct megasas_register_set __iomem *regs)
526 {
527 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
528 	    (le32_to_cpu(*instance->consumer) ==
529 		MEGASAS_ADPRESET_INPROG_SIGN))
530 		return 1;
531 	return 0;
532 }
533 
534 static struct megasas_instance_template megasas_instance_template_xscale = {
535 
536 	.fire_cmd = megasas_fire_cmd_xscale,
537 	.enable_intr = megasas_enable_intr_xscale,
538 	.disable_intr = megasas_disable_intr_xscale,
539 	.clear_intr = megasas_clear_intr_xscale,
540 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
541 	.adp_reset = megasas_adp_reset_xscale,
542 	.check_reset = megasas_check_reset_xscale,
543 	.service_isr = megasas_isr,
544 	.tasklet = megasas_complete_cmd_dpc,
545 	.init_adapter = megasas_init_adapter_mfi,
546 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
547 	.issue_dcmd = megasas_issue_dcmd,
548 };
549 
550 /**
551 *	This is the end of set of functions & definitions specific
552 *	to xscale (deviceid : 1064R, PERC5) controllers
553 */
554 
555 /**
556 *	The following functions are defined for ppc (deviceid : 0x60)
557 *	controllers
558 */
559 
560 /**
561  * megasas_enable_intr_ppc -	Enables interrupts
562  * @regs:			MFI register set
563  */
564 static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)565 megasas_enable_intr_ppc(struct megasas_instance *instance)
566 {
567 	struct megasas_register_set __iomem *regs;
568 
569 	regs = instance->reg_set;
570 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
571 
572 	writel(~0x80000000, &(regs)->outbound_intr_mask);
573 
574 	/* Dummy readl to force pci flush */
575 	readl(&regs->outbound_intr_mask);
576 }
577 
578 /**
579  * megasas_disable_intr_ppc -	Disable interrupt
580  * @regs:			MFI register set
581  */
582 static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)583 megasas_disable_intr_ppc(struct megasas_instance *instance)
584 {
585 	struct megasas_register_set __iomem *regs;
586 	u32 mask = 0xFFFFFFFF;
587 
588 	regs = instance->reg_set;
589 	writel(mask, &regs->outbound_intr_mask);
590 	/* Dummy readl to force pci flush */
591 	readl(&regs->outbound_intr_mask);
592 }
593 
594 /**
595  * megasas_read_fw_status_reg_ppc - returns the current FW status value
596  * @regs:			MFI register set
597  */
598 static u32
megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)599 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
600 {
601 	return readl(&(regs)->outbound_scratch_pad);
602 }
603 
604 /**
605  * megasas_clear_interrupt_ppc -	Check & clear interrupt
606  * @regs:				MFI register set
607  */
608 static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)609 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
610 {
611 	u32 status, mfiStatus = 0;
612 
613 	/*
614 	 * Check if it is our interrupt
615 	 */
616 	status = readl(&regs->outbound_intr_status);
617 
618 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
619 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
620 
621 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
622 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
623 
624 	/*
625 	 * Clear the interrupt by writing back the same value
626 	 */
627 	writel(status, &regs->outbound_doorbell_clear);
628 
629 	/* Dummy readl to force pci flush */
630 	readl(&regs->outbound_doorbell_clear);
631 
632 	return mfiStatus;
633 }
634 
635 /**
636  * megasas_fire_cmd_ppc -	Sends command to the FW
637  * @frame_phys_addr :		Physical address of cmd
638  * @frame_count :		Number of frames for the command
639  * @regs :			MFI register set
640  */
641 static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)642 megasas_fire_cmd_ppc(struct megasas_instance *instance,
643 		dma_addr_t frame_phys_addr,
644 		u32 frame_count,
645 		struct megasas_register_set __iomem *regs)
646 {
647 	unsigned long flags;
648 
649 	spin_lock_irqsave(&instance->hba_lock, flags);
650 	writel((frame_phys_addr | (frame_count<<1))|1,
651 			&(regs)->inbound_queue_port);
652 	spin_unlock_irqrestore(&instance->hba_lock, flags);
653 }
654 
655 /**
656  * megasas_check_reset_ppc -	For controller reset check
657  * @regs:				MFI register set
658  */
659 static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)660 megasas_check_reset_ppc(struct megasas_instance *instance,
661 			struct megasas_register_set __iomem *regs)
662 {
663 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
664 		return 1;
665 
666 	return 0;
667 }
668 
669 static struct megasas_instance_template megasas_instance_template_ppc = {
670 
671 	.fire_cmd = megasas_fire_cmd_ppc,
672 	.enable_intr = megasas_enable_intr_ppc,
673 	.disable_intr = megasas_disable_intr_ppc,
674 	.clear_intr = megasas_clear_intr_ppc,
675 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
676 	.adp_reset = megasas_adp_reset_xscale,
677 	.check_reset = megasas_check_reset_ppc,
678 	.service_isr = megasas_isr,
679 	.tasklet = megasas_complete_cmd_dpc,
680 	.init_adapter = megasas_init_adapter_mfi,
681 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
682 	.issue_dcmd = megasas_issue_dcmd,
683 };
684 
685 /**
686  * megasas_enable_intr_skinny -	Enables interrupts
687  * @regs:			MFI register set
688  */
689 static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)690 megasas_enable_intr_skinny(struct megasas_instance *instance)
691 {
692 	struct megasas_register_set __iomem *regs;
693 
694 	regs = instance->reg_set;
695 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
696 
697 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
698 
699 	/* Dummy readl to force pci flush */
700 	readl(&regs->outbound_intr_mask);
701 }
702 
703 /**
704  * megasas_disable_intr_skinny -	Disables interrupt
705  * @regs:			MFI register set
706  */
707 static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)708 megasas_disable_intr_skinny(struct megasas_instance *instance)
709 {
710 	struct megasas_register_set __iomem *regs;
711 	u32 mask = 0xFFFFFFFF;
712 
713 	regs = instance->reg_set;
714 	writel(mask, &regs->outbound_intr_mask);
715 	/* Dummy readl to force pci flush */
716 	readl(&regs->outbound_intr_mask);
717 }
718 
719 /**
720  * megasas_read_fw_status_reg_skinny - returns the current FW status value
721  * @regs:			MFI register set
722  */
723 static u32
megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem * regs)724 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
725 {
726 	return readl(&(regs)->outbound_scratch_pad);
727 }
728 
729 /**
730  * megasas_clear_interrupt_skinny -	Check & clear interrupt
731  * @regs:				MFI register set
732  */
733 static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem * regs)734 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
735 {
736 	u32 status;
737 	u32 mfiStatus = 0;
738 
739 	/*
740 	 * Check if it is our interrupt
741 	 */
742 	status = readl(&regs->outbound_intr_status);
743 
744 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
745 		return 0;
746 	}
747 
748 	/*
749 	 * Check if it is our interrupt
750 	 */
751 	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
752 	    MFI_STATE_FAULT) {
753 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
754 	} else
755 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
756 
757 	/*
758 	 * Clear the interrupt by writing back the same value
759 	 */
760 	writel(status, &regs->outbound_intr_status);
761 
762 	/*
763 	 * dummy read to flush PCI
764 	 */
765 	readl(&regs->outbound_intr_status);
766 
767 	return mfiStatus;
768 }
769 
770 /**
771  * megasas_fire_cmd_skinny -	Sends command to the FW
772  * @frame_phys_addr :		Physical address of cmd
773  * @frame_count :		Number of frames for the command
774  * @regs :			MFI register set
775  */
776 static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)777 megasas_fire_cmd_skinny(struct megasas_instance *instance,
778 			dma_addr_t frame_phys_addr,
779 			u32 frame_count,
780 			struct megasas_register_set __iomem *regs)
781 {
782 	unsigned long flags;
783 
784 	spin_lock_irqsave(&instance->hba_lock, flags);
785 	writel(upper_32_bits(frame_phys_addr),
786 	       &(regs)->inbound_high_queue_port);
787 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
788 	       &(regs)->inbound_low_queue_port);
789 	mmiowb();
790 	spin_unlock_irqrestore(&instance->hba_lock, flags);
791 }
792 
793 /**
794  * megasas_check_reset_skinny -	For controller reset check
795  * @regs:				MFI register set
796  */
797 static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)798 megasas_check_reset_skinny(struct megasas_instance *instance,
799 				struct megasas_register_set __iomem *regs)
800 {
801 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
802 		return 1;
803 
804 	return 0;
805 }
806 
807 static struct megasas_instance_template megasas_instance_template_skinny = {
808 
809 	.fire_cmd = megasas_fire_cmd_skinny,
810 	.enable_intr = megasas_enable_intr_skinny,
811 	.disable_intr = megasas_disable_intr_skinny,
812 	.clear_intr = megasas_clear_intr_skinny,
813 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
814 	.adp_reset = megasas_adp_reset_gen2,
815 	.check_reset = megasas_check_reset_skinny,
816 	.service_isr = megasas_isr,
817 	.tasklet = megasas_complete_cmd_dpc,
818 	.init_adapter = megasas_init_adapter_mfi,
819 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
820 	.issue_dcmd = megasas_issue_dcmd,
821 };
822 
823 
824 /**
825 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
826 *	controllers
827 */
828 
829 /**
830  * megasas_enable_intr_gen2 -  Enables interrupts
831  * @regs:                      MFI register set
832  */
833 static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)834 megasas_enable_intr_gen2(struct megasas_instance *instance)
835 {
836 	struct megasas_register_set __iomem *regs;
837 
838 	regs = instance->reg_set;
839 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
840 
841 	/* write ~0x00000005 (4 & 1) to the intr mask*/
842 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
843 
844 	/* Dummy readl to force pci flush */
845 	readl(&regs->outbound_intr_mask);
846 }
847 
848 /**
849  * megasas_disable_intr_gen2 - Disables interrupt
850  * @regs:                      MFI register set
851  */
852 static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)853 megasas_disable_intr_gen2(struct megasas_instance *instance)
854 {
855 	struct megasas_register_set __iomem *regs;
856 	u32 mask = 0xFFFFFFFF;
857 
858 	regs = instance->reg_set;
859 	writel(mask, &regs->outbound_intr_mask);
860 	/* Dummy readl to force pci flush */
861 	readl(&regs->outbound_intr_mask);
862 }
863 
864 /**
865  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
866  * @regs:                      MFI register set
867  */
868 static u32
megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem * regs)869 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
870 {
871 	return readl(&(regs)->outbound_scratch_pad);
872 }
873 
874 /**
875  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
876  * @regs:                              MFI register set
877  */
878 static int
megasas_clear_intr_gen2(struct megasas_register_set __iomem * regs)879 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
880 {
881 	u32 status;
882 	u32 mfiStatus = 0;
883 
884 	/*
885 	 * Check if it is our interrupt
886 	 */
887 	status = readl(&regs->outbound_intr_status);
888 
889 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
890 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
891 	}
892 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
893 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
894 	}
895 
896 	/*
897 	 * Clear the interrupt by writing back the same value
898 	 */
899 	if (mfiStatus)
900 		writel(status, &regs->outbound_doorbell_clear);
901 
902 	/* Dummy readl to force pci flush */
903 	readl(&regs->outbound_intr_status);
904 
905 	return mfiStatus;
906 }
907 /**
908  * megasas_fire_cmd_gen2 -     Sends command to the FW
909  * @frame_phys_addr :          Physical address of cmd
910  * @frame_count :              Number of frames for the command
911  * @regs :                     MFI register set
912  */
913 static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)914 megasas_fire_cmd_gen2(struct megasas_instance *instance,
915 			dma_addr_t frame_phys_addr,
916 			u32 frame_count,
917 			struct megasas_register_set __iomem *regs)
918 {
919 	unsigned long flags;
920 
921 	spin_lock_irqsave(&instance->hba_lock, flags);
922 	writel((frame_phys_addr | (frame_count<<1))|1,
923 			&(regs)->inbound_queue_port);
924 	spin_unlock_irqrestore(&instance->hba_lock, flags);
925 }
926 
927 /**
928  * megasas_adp_reset_gen2 -	For controller reset
929  * @regs:				MFI register set
930  */
931 static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)932 megasas_adp_reset_gen2(struct megasas_instance *instance,
933 			struct megasas_register_set __iomem *reg_set)
934 {
935 	u32 retry = 0 ;
936 	u32 HostDiag;
937 	u32 __iomem *seq_offset = &reg_set->seq_offset;
938 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
939 
940 	if (instance->instancet == &megasas_instance_template_skinny) {
941 		seq_offset = &reg_set->fusion_seq_offset;
942 		hostdiag_offset = &reg_set->fusion_host_diag;
943 	}
944 
945 	writel(0, seq_offset);
946 	writel(4, seq_offset);
947 	writel(0xb, seq_offset);
948 	writel(2, seq_offset);
949 	writel(7, seq_offset);
950 	writel(0xd, seq_offset);
951 
952 	msleep(1000);
953 
954 	HostDiag = (u32)readl(hostdiag_offset);
955 
956 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
957 		msleep(100);
958 		HostDiag = (u32)readl(hostdiag_offset);
959 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
960 					retry, HostDiag);
961 
962 		if (retry++ >= 100)
963 			return 1;
964 
965 	}
966 
967 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
968 
969 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
970 
971 	ssleep(10);
972 
973 	HostDiag = (u32)readl(hostdiag_offset);
974 	while (HostDiag & DIAG_RESET_ADAPTER) {
975 		msleep(100);
976 		HostDiag = (u32)readl(hostdiag_offset);
977 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
978 				retry, HostDiag);
979 
980 		if (retry++ >= 1000)
981 			return 1;
982 
983 	}
984 	return 0;
985 }
986 
987 /**
988  * megasas_check_reset_gen2 -	For controller reset check
989  * @regs:				MFI register set
990  */
991 static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)992 megasas_check_reset_gen2(struct megasas_instance *instance,
993 		struct megasas_register_set __iomem *regs)
994 {
995 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
996 		return 1;
997 
998 	return 0;
999 }
1000 
1001 static struct megasas_instance_template megasas_instance_template_gen2 = {
1002 
1003 	.fire_cmd = megasas_fire_cmd_gen2,
1004 	.enable_intr = megasas_enable_intr_gen2,
1005 	.disable_intr = megasas_disable_intr_gen2,
1006 	.clear_intr = megasas_clear_intr_gen2,
1007 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1008 	.adp_reset = megasas_adp_reset_gen2,
1009 	.check_reset = megasas_check_reset_gen2,
1010 	.service_isr = megasas_isr,
1011 	.tasklet = megasas_complete_cmd_dpc,
1012 	.init_adapter = megasas_init_adapter_mfi,
1013 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1014 	.issue_dcmd = megasas_issue_dcmd,
1015 };
1016 
1017 /**
1018 *	This is the end of set of functions & definitions
1019 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1020 */
1021 
1022 /*
1023  * Template added for TB (Fusion)
1024  */
1025 extern struct megasas_instance_template megasas_instance_template_fusion;
1026 
1027 /**
1028  * megasas_issue_polled -	Issues a polling command
1029  * @instance:			Adapter soft state
1030  * @cmd:			Command packet to be issued
1031  *
1032  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1033  */
1034 int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)1035 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1036 {
1037 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1038 
1039 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1040 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1041 
1042 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1043 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1044 			__func__, __LINE__);
1045 		return DCMD_NOT_FIRED;
1046 	}
1047 
1048 	instance->instancet->issue_dcmd(instance, cmd);
1049 
1050 	return wait_and_poll(instance, cmd, instance->requestorId ?
1051 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1052 }
1053 
1054 /**
1055  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1056  * @instance:			Adapter soft state
1057  * @cmd:			Command to be issued
1058  * @timeout:			Timeout in seconds
1059  *
1060  * This function waits on an event for the command to be returned from ISR.
1061  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1062  * Used to issue ioctl commands.
1063  */
1064 int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1065 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1066 			  struct megasas_cmd *cmd, int timeout)
1067 {
1068 	int ret = 0;
1069 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1070 
1071 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1072 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1073 			__func__, __LINE__);
1074 		return DCMD_NOT_FIRED;
1075 	}
1076 
1077 	instance->instancet->issue_dcmd(instance, cmd);
1078 
1079 	if (timeout) {
1080 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1081 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1082 		if (!ret) {
1083 			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1084 				__func__, __LINE__);
1085 			return DCMD_TIMEOUT;
1086 		}
1087 	} else
1088 		wait_event(instance->int_cmd_wait_q,
1089 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1090 
1091 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1092 		DCMD_SUCCESS : DCMD_FAILED;
1093 }
1094 
1095 /**
1096  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1097  * @instance:				Adapter soft state
1098  * @cmd_to_abort:			Previously issued cmd to be aborted
1099  * @timeout:				Timeout in seconds
1100  *
1101  * MFI firmware can abort previously issued AEN comamnd (automatic event
1102  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1103  * cmd and waits for return status.
1104  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1105  */
1106 static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1107 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1108 				struct megasas_cmd *cmd_to_abort, int timeout)
1109 {
1110 	struct megasas_cmd *cmd;
1111 	struct megasas_abort_frame *abort_fr;
1112 	int ret = 0;
1113 
1114 	cmd = megasas_get_cmd(instance);
1115 
1116 	if (!cmd)
1117 		return -1;
1118 
1119 	abort_fr = &cmd->frame->abort;
1120 
1121 	/*
1122 	 * Prepare and issue the abort frame
1123 	 */
1124 	abort_fr->cmd = MFI_CMD_ABORT;
1125 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1126 	abort_fr->flags = cpu_to_le16(0);
1127 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1128 	abort_fr->abort_mfi_phys_addr_lo =
1129 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1130 	abort_fr->abort_mfi_phys_addr_hi =
1131 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1132 
1133 	cmd->sync_cmd = 1;
1134 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1135 
1136 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1137 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1138 			__func__, __LINE__);
1139 		return DCMD_NOT_FIRED;
1140 	}
1141 
1142 	instance->instancet->issue_dcmd(instance, cmd);
1143 
1144 	if (timeout) {
1145 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1146 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1147 		if (!ret) {
1148 			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1149 				__func__, __LINE__);
1150 			return DCMD_TIMEOUT;
1151 		}
1152 	} else
1153 		wait_event(instance->abort_cmd_wait_q,
1154 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1155 
1156 	cmd->sync_cmd = 0;
1157 
1158 	megasas_return_cmd(instance, cmd);
1159 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1160 		DCMD_SUCCESS : DCMD_FAILED;
1161 }
1162 
1163 /**
1164  * megasas_make_sgl32 -	Prepares 32-bit SGL
1165  * @instance:		Adapter soft state
1166  * @scp:		SCSI command from the mid-layer
1167  * @mfi_sgl:		SGL to be filled in
1168  *
1169  * If successful, this function returns the number of SG elements. Otherwise,
1170  * it returnes -1.
1171  */
1172 static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1173 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1174 		   union megasas_sgl *mfi_sgl)
1175 {
1176 	int i;
1177 	int sge_count;
1178 	struct scatterlist *os_sgl;
1179 
1180 	sge_count = scsi_dma_map(scp);
1181 	BUG_ON(sge_count < 0);
1182 
1183 	if (sge_count) {
1184 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1185 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1186 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1187 		}
1188 	}
1189 	return sge_count;
1190 }
1191 
1192 /**
1193  * megasas_make_sgl64 -	Prepares 64-bit SGL
1194  * @instance:		Adapter soft state
1195  * @scp:		SCSI command from the mid-layer
1196  * @mfi_sgl:		SGL to be filled in
1197  *
1198  * If successful, this function returns the number of SG elements. Otherwise,
1199  * it returnes -1.
1200  */
1201 static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1202 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1203 		   union megasas_sgl *mfi_sgl)
1204 {
1205 	int i;
1206 	int sge_count;
1207 	struct scatterlist *os_sgl;
1208 
1209 	sge_count = scsi_dma_map(scp);
1210 	BUG_ON(sge_count < 0);
1211 
1212 	if (sge_count) {
1213 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1214 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1215 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1216 		}
1217 	}
1218 	return sge_count;
1219 }
1220 
1221 /**
1222  * megasas_make_sgl_skinny - Prepares IEEE SGL
1223  * @instance:           Adapter soft state
1224  * @scp:                SCSI command from the mid-layer
1225  * @mfi_sgl:            SGL to be filled in
1226  *
1227  * If successful, this function returns the number of SG elements. Otherwise,
1228  * it returnes -1.
1229  */
1230 static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1231 megasas_make_sgl_skinny(struct megasas_instance *instance,
1232 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1233 {
1234 	int i;
1235 	int sge_count;
1236 	struct scatterlist *os_sgl;
1237 
1238 	sge_count = scsi_dma_map(scp);
1239 
1240 	if (sge_count) {
1241 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1242 			mfi_sgl->sge_skinny[i].length =
1243 				cpu_to_le32(sg_dma_len(os_sgl));
1244 			mfi_sgl->sge_skinny[i].phys_addr =
1245 				cpu_to_le64(sg_dma_address(os_sgl));
1246 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1247 		}
1248 	}
1249 	return sge_count;
1250 }
1251 
1252  /**
1253  * megasas_get_frame_count - Computes the number of frames
1254  * @frame_type		: type of frame- io or pthru frame
1255  * @sge_count		: number of sg elements
1256  *
1257  * Returns the number of frames required for numnber of sge's (sge_count)
1258  */
1259 
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1260 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1261 			u8 sge_count, u8 frame_type)
1262 {
1263 	int num_cnt;
1264 	int sge_bytes;
1265 	u32 sge_sz;
1266 	u32 frame_count = 0;
1267 
1268 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1269 	    sizeof(struct megasas_sge32);
1270 
1271 	if (instance->flag_ieee) {
1272 		sge_sz = sizeof(struct megasas_sge_skinny);
1273 	}
1274 
1275 	/*
1276 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1277 	 * 3 SGEs for 32-bit SGLs for ldio &
1278 	 * 1 SGEs for 64-bit SGLs and
1279 	 * 2 SGEs for 32-bit SGLs for pthru frame
1280 	 */
1281 	if (unlikely(frame_type == PTHRU_FRAME)) {
1282 		if (instance->flag_ieee == 1) {
1283 			num_cnt = sge_count - 1;
1284 		} else if (IS_DMA64)
1285 			num_cnt = sge_count - 1;
1286 		else
1287 			num_cnt = sge_count - 2;
1288 	} else {
1289 		if (instance->flag_ieee == 1) {
1290 			num_cnt = sge_count - 1;
1291 		} else if (IS_DMA64)
1292 			num_cnt = sge_count - 2;
1293 		else
1294 			num_cnt = sge_count - 3;
1295 	}
1296 
1297 	if (num_cnt > 0) {
1298 		sge_bytes = sge_sz * num_cnt;
1299 
1300 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1301 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1302 	}
1303 	/* Main frame */
1304 	frame_count += 1;
1305 
1306 	if (frame_count > 7)
1307 		frame_count = 8;
1308 	return frame_count;
1309 }
1310 
1311 /**
1312  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1313  * @instance:		Adapter soft state
1314  * @scp:		SCSI command
1315  * @cmd:		Command to be prepared in
1316  *
1317  * This function prepares CDB commands. These are typcially pass-through
1318  * commands to the devices.
1319  */
1320 static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1321 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1322 		   struct megasas_cmd *cmd)
1323 {
1324 	u32 is_logical;
1325 	u32 device_id;
1326 	u16 flags = 0;
1327 	struct megasas_pthru_frame *pthru;
1328 
1329 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1330 	device_id = MEGASAS_DEV_INDEX(scp);
1331 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1332 
1333 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1334 		flags = MFI_FRAME_DIR_WRITE;
1335 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1336 		flags = MFI_FRAME_DIR_READ;
1337 	else if (scp->sc_data_direction == PCI_DMA_NONE)
1338 		flags = MFI_FRAME_DIR_NONE;
1339 
1340 	if (instance->flag_ieee == 1) {
1341 		flags |= MFI_FRAME_IEEE;
1342 	}
1343 
1344 	/*
1345 	 * Prepare the DCDB frame
1346 	 */
1347 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1348 	pthru->cmd_status = 0x0;
1349 	pthru->scsi_status = 0x0;
1350 	pthru->target_id = device_id;
1351 	pthru->lun = scp->device->lun;
1352 	pthru->cdb_len = scp->cmd_len;
1353 	pthru->timeout = 0;
1354 	pthru->pad_0 = 0;
1355 	pthru->flags = cpu_to_le16(flags);
1356 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1357 
1358 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1359 
1360 	/*
1361 	 * If the command is for the tape device, set the
1362 	 * pthru timeout to the os layer timeout value.
1363 	 */
1364 	if (scp->device->type == TYPE_TAPE) {
1365 		if ((scp->request->timeout / HZ) > 0xFFFF)
1366 			pthru->timeout = cpu_to_le16(0xFFFF);
1367 		else
1368 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1369 	}
1370 
1371 	/*
1372 	 * Construct SGL
1373 	 */
1374 	if (instance->flag_ieee == 1) {
1375 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1376 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1377 						      &pthru->sgl);
1378 	} else if (IS_DMA64) {
1379 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1380 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1381 						      &pthru->sgl);
1382 	} else
1383 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1384 						      &pthru->sgl);
1385 
1386 	if (pthru->sge_count > instance->max_num_sge) {
1387 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1388 			pthru->sge_count);
1389 		return 0;
1390 	}
1391 
1392 	/*
1393 	 * Sense info specific
1394 	 */
1395 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1396 	pthru->sense_buf_phys_addr_hi =
1397 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1398 	pthru->sense_buf_phys_addr_lo =
1399 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1400 
1401 	/*
1402 	 * Compute the total number of frames this command consumes. FW uses
1403 	 * this number to pull sufficient number of frames from host memory.
1404 	 */
1405 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1406 							PTHRU_FRAME);
1407 
1408 	return cmd->frame_count;
1409 }
1410 
1411 /**
1412  * megasas_build_ldio -	Prepares IOs to logical devices
1413  * @instance:		Adapter soft state
1414  * @scp:		SCSI command
1415  * @cmd:		Command to be prepared
1416  *
1417  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1418  */
1419 static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1420 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1421 		   struct megasas_cmd *cmd)
1422 {
1423 	u32 device_id;
1424 	u8 sc = scp->cmnd[0];
1425 	u16 flags = 0;
1426 	struct megasas_io_frame *ldio;
1427 
1428 	device_id = MEGASAS_DEV_INDEX(scp);
1429 	ldio = (struct megasas_io_frame *)cmd->frame;
1430 
1431 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1432 		flags = MFI_FRAME_DIR_WRITE;
1433 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1434 		flags = MFI_FRAME_DIR_READ;
1435 
1436 	if (instance->flag_ieee == 1) {
1437 		flags |= MFI_FRAME_IEEE;
1438 	}
1439 
1440 	/*
1441 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1442 	 */
1443 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1444 	ldio->cmd_status = 0x0;
1445 	ldio->scsi_status = 0x0;
1446 	ldio->target_id = device_id;
1447 	ldio->timeout = 0;
1448 	ldio->reserved_0 = 0;
1449 	ldio->pad_0 = 0;
1450 	ldio->flags = cpu_to_le16(flags);
1451 	ldio->start_lba_hi = 0;
1452 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1453 
1454 	/*
1455 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1456 	 */
1457 	if (scp->cmd_len == 6) {
1458 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1459 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1460 						 ((u32) scp->cmnd[2] << 8) |
1461 						 (u32) scp->cmnd[3]);
1462 
1463 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1464 	}
1465 
1466 	/*
1467 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1468 	 */
1469 	else if (scp->cmd_len == 10) {
1470 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1471 					      ((u32) scp->cmnd[7] << 8));
1472 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1473 						 ((u32) scp->cmnd[3] << 16) |
1474 						 ((u32) scp->cmnd[4] << 8) |
1475 						 (u32) scp->cmnd[5]);
1476 	}
1477 
1478 	/*
1479 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1480 	 */
1481 	else if (scp->cmd_len == 12) {
1482 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1483 					      ((u32) scp->cmnd[7] << 16) |
1484 					      ((u32) scp->cmnd[8] << 8) |
1485 					      (u32) scp->cmnd[9]);
1486 
1487 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1488 						 ((u32) scp->cmnd[3] << 16) |
1489 						 ((u32) scp->cmnd[4] << 8) |
1490 						 (u32) scp->cmnd[5]);
1491 	}
1492 
1493 	/*
1494 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1495 	 */
1496 	else if (scp->cmd_len == 16) {
1497 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1498 					      ((u32) scp->cmnd[11] << 16) |
1499 					      ((u32) scp->cmnd[12] << 8) |
1500 					      (u32) scp->cmnd[13]);
1501 
1502 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1503 						 ((u32) scp->cmnd[7] << 16) |
1504 						 ((u32) scp->cmnd[8] << 8) |
1505 						 (u32) scp->cmnd[9]);
1506 
1507 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1508 						 ((u32) scp->cmnd[3] << 16) |
1509 						 ((u32) scp->cmnd[4] << 8) |
1510 						 (u32) scp->cmnd[5]);
1511 
1512 	}
1513 
1514 	/*
1515 	 * Construct SGL
1516 	 */
1517 	if (instance->flag_ieee) {
1518 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1519 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1520 					      &ldio->sgl);
1521 	} else if (IS_DMA64) {
1522 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1523 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1524 	} else
1525 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1526 
1527 	if (ldio->sge_count > instance->max_num_sge) {
1528 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1529 			ldio->sge_count);
1530 		return 0;
1531 	}
1532 
1533 	/*
1534 	 * Sense info specific
1535 	 */
1536 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1537 	ldio->sense_buf_phys_addr_hi = 0;
1538 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1539 
1540 	/*
1541 	 * Compute the total number of frames this command consumes. FW uses
1542 	 * this number to pull sufficient number of frames from host memory.
1543 	 */
1544 	cmd->frame_count = megasas_get_frame_count(instance,
1545 			ldio->sge_count, IO_FRAME);
1546 
1547 	return cmd->frame_count;
1548 }
1549 
1550 /**
1551  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1552  *				and whether it's RW or non RW
1553  * @scmd:			SCSI command
1554  *
1555  */
megasas_cmd_type(struct scsi_cmnd * cmd)1556 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1557 {
1558 	int ret;
1559 
1560 	switch (cmd->cmnd[0]) {
1561 	case READ_10:
1562 	case WRITE_10:
1563 	case READ_12:
1564 	case WRITE_12:
1565 	case READ_6:
1566 	case WRITE_6:
1567 	case READ_16:
1568 	case WRITE_16:
1569 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1570 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1571 		break;
1572 	default:
1573 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1574 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1575 	}
1576 	return ret;
1577 }
1578 
1579  /**
1580  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1581  *					in FW
1582  * @instance:				Adapter soft state
1583  */
1584 static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1585 megasas_dump_pending_frames(struct megasas_instance *instance)
1586 {
1587 	struct megasas_cmd *cmd;
1588 	int i,n;
1589 	union megasas_sgl *mfi_sgl;
1590 	struct megasas_io_frame *ldio;
1591 	struct megasas_pthru_frame *pthru;
1592 	u32 sgcount;
1593 	u16 max_cmd = instance->max_fw_cmds;
1594 
1595 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1596 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1597 	if (IS_DMA64)
1598 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1599 	else
1600 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1601 
1602 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1603 	for (i = 0; i < max_cmd; i++) {
1604 		cmd = instance->cmd_list[i];
1605 		if (!cmd->scmd)
1606 			continue;
1607 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1608 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1609 			ldio = (struct megasas_io_frame *)cmd->frame;
1610 			mfi_sgl = &ldio->sgl;
1611 			sgcount = ldio->sge_count;
1612 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1613 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1614 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1615 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1616 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1617 		} else {
1618 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1619 			mfi_sgl = &pthru->sgl;
1620 			sgcount = pthru->sge_count;
1621 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1622 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1623 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1624 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1625 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1626 		}
1627 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1628 			for (n = 0; n < sgcount; n++) {
1629 				if (IS_DMA64)
1630 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1631 						le32_to_cpu(mfi_sgl->sge64[n].length),
1632 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1633 				else
1634 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1635 						le32_to_cpu(mfi_sgl->sge32[n].length),
1636 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1637 			}
1638 		}
1639 	} /*for max_cmd*/
1640 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1641 	for (i = 0; i < max_cmd; i++) {
1642 
1643 		cmd = instance->cmd_list[i];
1644 
1645 		if (cmd->sync_cmd == 1)
1646 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1647 	}
1648 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1649 }
1650 
1651 u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1652 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1653 			    struct scsi_cmnd *scmd)
1654 {
1655 	struct megasas_cmd *cmd;
1656 	u32 frame_count;
1657 
1658 	cmd = megasas_get_cmd(instance);
1659 	if (!cmd)
1660 		return SCSI_MLQUEUE_HOST_BUSY;
1661 
1662 	/*
1663 	 * Logical drive command
1664 	 */
1665 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1666 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1667 	else
1668 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1669 
1670 	if (!frame_count)
1671 		goto out_return_cmd;
1672 
1673 	cmd->scmd = scmd;
1674 	scmd->SCp.ptr = (char *)cmd;
1675 
1676 	/*
1677 	 * Issue the command to the FW
1678 	 */
1679 	atomic_inc(&instance->fw_outstanding);
1680 
1681 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1682 				cmd->frame_count-1, instance->reg_set);
1683 
1684 	return 0;
1685 out_return_cmd:
1686 	megasas_return_cmd(instance, cmd);
1687 	return SCSI_MLQUEUE_HOST_BUSY;
1688 }
1689 
1690 
1691 /**
1692  * megasas_queue_command -	Queue entry point
1693  * @scmd:			SCSI command to be queued
1694  * @done:			Callback entry point
1695  */
1696 static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1697 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1698 {
1699 	struct megasas_instance *instance;
1700 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1701 
1702 	instance = (struct megasas_instance *)
1703 	    scmd->device->host->hostdata;
1704 
1705 	if (instance->unload == 1) {
1706 		scmd->result = DID_NO_CONNECT << 16;
1707 		scmd->scsi_done(scmd);
1708 		return 0;
1709 	}
1710 
1711 	if (instance->issuepend_done == 0)
1712 		return SCSI_MLQUEUE_HOST_BUSY;
1713 
1714 
1715 	/* Check for an mpio path and adjust behavior */
1716 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1717 		if (megasas_check_mpio_paths(instance, scmd) ==
1718 		    (DID_REQUEUE << 16)) {
1719 			return SCSI_MLQUEUE_HOST_BUSY;
1720 		} else {
1721 			scmd->result = DID_NO_CONNECT << 16;
1722 			scmd->scsi_done(scmd);
1723 			return 0;
1724 		}
1725 	}
1726 
1727 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1728 		scmd->result = DID_NO_CONNECT << 16;
1729 		scmd->scsi_done(scmd);
1730 		return 0;
1731 	}
1732 
1733 	mr_device_priv_data = scmd->device->hostdata;
1734 	if (!mr_device_priv_data) {
1735 		scmd->result = DID_NO_CONNECT << 16;
1736 		scmd->scsi_done(scmd);
1737 		return 0;
1738 	}
1739 
1740 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1741 		return SCSI_MLQUEUE_HOST_BUSY;
1742 
1743 	if (mr_device_priv_data->tm_busy)
1744 		return SCSI_MLQUEUE_DEVICE_BUSY;
1745 
1746 
1747 	scmd->result = 0;
1748 
1749 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1750 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1751 		scmd->device->lun)) {
1752 		scmd->result = DID_BAD_TARGET << 16;
1753 		goto out_done;
1754 	}
1755 
1756 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1757 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1758 	    (!instance->fw_sync_cache_support)) {
1759 		scmd->result = DID_OK << 16;
1760 		goto out_done;
1761 	}
1762 
1763 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1764 
1765  out_done:
1766 	scmd->scsi_done(scmd);
1767 	return 0;
1768 }
1769 
megasas_lookup_instance(u16 host_no)1770 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1771 {
1772 	int i;
1773 
1774 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1775 
1776 		if ((megasas_mgmt_info.instance[i]) &&
1777 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1778 			return megasas_mgmt_info.instance[i];
1779 	}
1780 
1781 	return NULL;
1782 }
1783 
1784 /*
1785 * megasas_set_dynamic_target_properties -
1786 * Device property set by driver may not be static and it is required to be
1787 * updated after OCR
1788 *
1789 * set tm_capable.
1790 * set dma alignment (only for eedp protection enable vd).
1791 *
1792 * @sdev: OS provided scsi device
1793 *
1794 * Returns void
1795 */
megasas_set_dynamic_target_properties(struct scsi_device * sdev,bool is_target_prop)1796 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1797 					   bool is_target_prop)
1798 {
1799 	u16 pd_index = 0, ld;
1800 	u32 device_id;
1801 	struct megasas_instance *instance;
1802 	struct fusion_context *fusion;
1803 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1804 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1805 	struct MR_LD_RAID *raid;
1806 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1807 
1808 	instance = megasas_lookup_instance(sdev->host->host_no);
1809 	fusion = instance->ctrl_context;
1810 	mr_device_priv_data = sdev->hostdata;
1811 
1812 	if (!fusion || !mr_device_priv_data)
1813 		return;
1814 
1815 	if (MEGASAS_IS_LOGICAL(sdev)) {
1816 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1817 					+ sdev->id;
1818 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1819 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1820 		if (ld >= instance->fw_supported_vd_count)
1821 			return;
1822 		raid = MR_LdRaidGet(ld, local_map_ptr);
1823 
1824 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1825 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1826 
1827 		mr_device_priv_data->is_tm_capable =
1828 			raid->capability.tmCapable;
1829 	} else if (instance->use_seqnum_jbod_fp) {
1830 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1831 			sdev->id;
1832 		pd_sync = (void *)fusion->pd_seq_sync
1833 				[(instance->pd_seq_map_id - 1) & 1];
1834 		mr_device_priv_data->is_tm_capable =
1835 			pd_sync->seq[pd_index].capability.tmCapable;
1836 	}
1837 
1838 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1839 		/*
1840 		 * If FW provides a target reset timeout value, driver will use
1841 		 * it. If not set, fallback to default values.
1842 		 */
1843 		mr_device_priv_data->target_reset_tmo =
1844 			min_t(u8, instance->max_reset_tmo,
1845 			      instance->tgt_prop->reset_tmo);
1846 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1847 	} else {
1848 		mr_device_priv_data->target_reset_tmo =
1849 						MEGASAS_DEFAULT_TM_TIMEOUT;
1850 		mr_device_priv_data->task_abort_tmo =
1851 						MEGASAS_DEFAULT_TM_TIMEOUT;
1852 	}
1853 }
1854 
1855 /*
1856  * megasas_set_nvme_device_properties -
1857  * set nomerges=2
1858  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1859  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1860  *
1861  * MR firmware provides value in KB. Caller of this function converts
1862  * kb into bytes.
1863  *
1864  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1865  * MR firmware provides value 128 as (32 * 4K) = 128K.
1866  *
1867  * @sdev:				scsi device
1868  * @max_io_size:				maximum io transfer size
1869  *
1870  */
1871 static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1872 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1873 {
1874 	struct megasas_instance *instance;
1875 	u32 mr_nvme_pg_size;
1876 
1877 	instance = (struct megasas_instance *)sdev->host->hostdata;
1878 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1879 				MR_DEFAULT_NVME_PAGE_SIZE);
1880 
1881 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1882 
1883 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1884 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1885 }
1886 
1887 
1888 /*
1889  * megasas_set_static_target_properties -
1890  * Device property set by driver are static and it is not required to be
1891  * updated after OCR.
1892  *
1893  * set io timeout
1894  * set device queue depth
1895  * set nvme device properties. see - megasas_set_nvme_device_properties
1896  *
1897  * @sdev:				scsi device
1898  * @is_target_prop			true, if fw provided target properties.
1899  */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)1900 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1901 						 bool is_target_prop)
1902 {
1903 	u16	target_index = 0;
1904 	u8 interface_type;
1905 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1906 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1907 	u32 tgt_device_qd;
1908 	struct megasas_instance *instance;
1909 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1910 
1911 	instance = megasas_lookup_instance(sdev->host->host_no);
1912 	mr_device_priv_data = sdev->hostdata;
1913 	interface_type  = mr_device_priv_data->interface_type;
1914 
1915 	/*
1916 	 * The RAID firmware may require extended timeouts.
1917 	 */
1918 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1919 
1920 	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1921 
1922 	switch (interface_type) {
1923 	case SAS_PD:
1924 		device_qd = MEGASAS_SAS_QD;
1925 		break;
1926 	case SATA_PD:
1927 		device_qd = MEGASAS_SATA_QD;
1928 		break;
1929 	case NVME_PD:
1930 		device_qd = MEGASAS_NVME_QD;
1931 		break;
1932 	}
1933 
1934 	if (is_target_prop) {
1935 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1936 		if (tgt_device_qd &&
1937 		    (tgt_device_qd <= instance->host->can_queue))
1938 			device_qd = tgt_device_qd;
1939 
1940 		/* max_io_size_kb will be set to non zero for
1941 		 * nvme based vd and syspd.
1942 		 */
1943 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1944 	}
1945 
1946 	if (instance->nvme_page_size && max_io_size_kb)
1947 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1948 
1949 	scsi_change_queue_depth(sdev, device_qd);
1950 
1951 }
1952 
1953 
megasas_slave_configure(struct scsi_device * sdev)1954 static int megasas_slave_configure(struct scsi_device *sdev)
1955 {
1956 	u16 pd_index = 0;
1957 	struct megasas_instance *instance;
1958 	int ret_target_prop = DCMD_FAILED;
1959 	bool is_target_prop = false;
1960 
1961 	instance = megasas_lookup_instance(sdev->host->host_no);
1962 	if (instance->pd_list_not_supported) {
1963 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1964 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1965 				sdev->id;
1966 			if (instance->pd_list[pd_index].driveState !=
1967 				MR_PD_STATE_SYSTEM)
1968 				return -ENXIO;
1969 		}
1970 	}
1971 
1972 	mutex_lock(&instance->reset_mutex);
1973 	/* Send DCMD to Firmware and cache the information */
1974 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1975 		megasas_get_pd_info(instance, sdev);
1976 
1977 	/* Some ventura firmware may not have instance->nvme_page_size set.
1978 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1979 	 */
1980 	if ((instance->tgt_prop) && (instance->nvme_page_size))
1981 		ret_target_prop = megasas_get_target_prop(instance, sdev);
1982 
1983 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1984 	megasas_set_static_target_properties(sdev, is_target_prop);
1985 
1986 	/* This sdev property may change post OCR */
1987 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
1988 
1989 	mutex_unlock(&instance->reset_mutex);
1990 
1991 	return 0;
1992 }
1993 
megasas_slave_alloc(struct scsi_device * sdev)1994 static int megasas_slave_alloc(struct scsi_device *sdev)
1995 {
1996 	u16 pd_index = 0;
1997 	struct megasas_instance *instance ;
1998 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1999 
2000 	instance = megasas_lookup_instance(sdev->host->host_no);
2001 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2002 		/*
2003 		 * Open the OS scan to the SYSTEM PD
2004 		 */
2005 		pd_index =
2006 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2007 			sdev->id;
2008 		if ((instance->pd_list_not_supported ||
2009 			instance->pd_list[pd_index].driveState ==
2010 			MR_PD_STATE_SYSTEM)) {
2011 			goto scan_target;
2012 		}
2013 		return -ENXIO;
2014 	}
2015 
2016 scan_target:
2017 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2018 					GFP_KERNEL);
2019 	if (!mr_device_priv_data)
2020 		return -ENOMEM;
2021 	sdev->hostdata = mr_device_priv_data;
2022 
2023 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2024 		   instance->r1_ldio_hint_default);
2025 	return 0;
2026 }
2027 
megasas_slave_destroy(struct scsi_device * sdev)2028 static void megasas_slave_destroy(struct scsi_device *sdev)
2029 {
2030 	kfree(sdev->hostdata);
2031 	sdev->hostdata = NULL;
2032 }
2033 
2034 /*
2035 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2036 *                                       kill adapter
2037 * @instance:				Adapter soft state
2038 *
2039 */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)2040 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2041 {
2042 	int i;
2043 	struct megasas_cmd *cmd_mfi;
2044 	struct megasas_cmd_fusion *cmd_fusion;
2045 	struct fusion_context *fusion = instance->ctrl_context;
2046 
2047 	/* Find all outstanding ioctls */
2048 	if (fusion) {
2049 		for (i = 0; i < instance->max_fw_cmds; i++) {
2050 			cmd_fusion = fusion->cmd_list[i];
2051 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2052 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2053 				if (cmd_mfi->sync_cmd &&
2054 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2055 					cmd_mfi->frame->hdr.cmd_status =
2056 							MFI_STAT_WRONG_STATE;
2057 					megasas_complete_cmd(instance,
2058 							     cmd_mfi, DID_OK);
2059 				}
2060 			}
2061 		}
2062 	} else {
2063 		for (i = 0; i < instance->max_fw_cmds; i++) {
2064 			cmd_mfi = instance->cmd_list[i];
2065 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2066 				MFI_CMD_ABORT)
2067 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2068 		}
2069 	}
2070 }
2071 
2072 
megaraid_sas_kill_hba(struct megasas_instance * instance)2073 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2074 {
2075 	/* Set critical error to block I/O & ioctls in case caller didn't */
2076 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2077 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2078 	msleep(1000);
2079 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2080 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2081 		(instance->adapter_type != MFI_SERIES)) {
2082 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2083 		/* Flush */
2084 		readl(&instance->reg_set->doorbell);
2085 		if (instance->requestorId && instance->peerIsPresent)
2086 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2087 	} else {
2088 		writel(MFI_STOP_ADP,
2089 			&instance->reg_set->inbound_doorbell);
2090 	}
2091 	/* Complete outstanding ioctls when adapter is killed */
2092 	megasas_complete_outstanding_ioctls(instance);
2093 }
2094 
2095  /**
2096   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2097   *					restored to max value
2098   * @instance:			Adapter soft state
2099   *
2100   */
2101 void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2102 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2103 {
2104 	unsigned long flags;
2105 
2106 	if (instance->flag & MEGASAS_FW_BUSY
2107 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2108 	    && atomic_read(&instance->fw_outstanding) <
2109 	    instance->throttlequeuedepth + 1) {
2110 
2111 		spin_lock_irqsave(instance->host->host_lock, flags);
2112 		instance->flag &= ~MEGASAS_FW_BUSY;
2113 
2114 		instance->host->can_queue = instance->cur_can_queue;
2115 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2116 	}
2117 }
2118 
2119 /**
2120  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2121  * @instance_addr:			Address of adapter soft state
2122  *
2123  * Tasklet to complete cmds
2124  */
megasas_complete_cmd_dpc(unsigned long instance_addr)2125 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2126 {
2127 	u32 producer;
2128 	u32 consumer;
2129 	u32 context;
2130 	struct megasas_cmd *cmd;
2131 	struct megasas_instance *instance =
2132 				(struct megasas_instance *)instance_addr;
2133 	unsigned long flags;
2134 
2135 	/* If we have already declared adapter dead, donot complete cmds */
2136 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2137 		return;
2138 
2139 	spin_lock_irqsave(&instance->completion_lock, flags);
2140 
2141 	producer = le32_to_cpu(*instance->producer);
2142 	consumer = le32_to_cpu(*instance->consumer);
2143 
2144 	while (consumer != producer) {
2145 		context = le32_to_cpu(instance->reply_queue[consumer]);
2146 		if (context >= instance->max_fw_cmds) {
2147 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2148 				context);
2149 			BUG();
2150 		}
2151 
2152 		cmd = instance->cmd_list[context];
2153 
2154 		megasas_complete_cmd(instance, cmd, DID_OK);
2155 
2156 		consumer++;
2157 		if (consumer == (instance->max_fw_cmds + 1)) {
2158 			consumer = 0;
2159 		}
2160 	}
2161 
2162 	*instance->consumer = cpu_to_le32(producer);
2163 
2164 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2165 
2166 	/*
2167 	 * Check if we can restore can_queue
2168 	 */
2169 	megasas_check_and_restore_queue_depth(instance);
2170 }
2171 
2172 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2173 
2174 /**
2175  * megasas_start_timer - Initializes sriov heartbeat timer object
2176  * @instance:		Adapter soft state
2177  *
2178  */
megasas_start_timer(struct megasas_instance * instance)2179 void megasas_start_timer(struct megasas_instance *instance)
2180 {
2181 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2182 
2183 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2184 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2185 	add_timer(timer);
2186 }
2187 
2188 static void
2189 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2190 
2191 static void
2192 process_fw_state_change_wq(struct work_struct *work);
2193 
megasas_do_ocr(struct megasas_instance * instance)2194 void megasas_do_ocr(struct megasas_instance *instance)
2195 {
2196 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2197 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2198 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2199 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2200 	}
2201 	instance->instancet->disable_intr(instance);
2202 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2203 	instance->issuepend_done = 0;
2204 
2205 	atomic_set(&instance->fw_outstanding, 0);
2206 	megasas_internal_reset_defer_cmds(instance);
2207 	process_fw_state_change_wq(&instance->work_init);
2208 }
2209 
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2210 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2211 					    int initial)
2212 {
2213 	struct megasas_cmd *cmd;
2214 	struct megasas_dcmd_frame *dcmd;
2215 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2216 	dma_addr_t new_affiliation_111_h;
2217 	int ld, retval = 0;
2218 	u8 thisVf;
2219 
2220 	cmd = megasas_get_cmd(instance);
2221 
2222 	if (!cmd) {
2223 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2224 		       "Failed to get cmd for scsi%d\n",
2225 			instance->host->host_no);
2226 		return -ENOMEM;
2227 	}
2228 
2229 	dcmd = &cmd->frame->dcmd;
2230 
2231 	if (!instance->vf_affiliation_111) {
2232 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2233 		       "affiliation for scsi%d\n", instance->host->host_no);
2234 		megasas_return_cmd(instance, cmd);
2235 		return -ENOMEM;
2236 	}
2237 
2238 	if (initial)
2239 			memset(instance->vf_affiliation_111, 0,
2240 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2241 	else {
2242 		new_affiliation_111 =
2243 			pci_zalloc_consistent(instance->pdev,
2244 					      sizeof(struct MR_LD_VF_AFFILIATION_111),
2245 					      &new_affiliation_111_h);
2246 		if (!new_affiliation_111) {
2247 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2248 			       "memory for new affiliation for scsi%d\n",
2249 			       instance->host->host_no);
2250 			megasas_return_cmd(instance, cmd);
2251 			return -ENOMEM;
2252 		}
2253 	}
2254 
2255 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2256 
2257 	dcmd->cmd = MFI_CMD_DCMD;
2258 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2259 	dcmd->sge_count = 1;
2260 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2261 	dcmd->timeout = 0;
2262 	dcmd->pad_0 = 0;
2263 	dcmd->data_xfer_len =
2264 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2265 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2266 
2267 	if (initial)
2268 		dcmd->sgl.sge32[0].phys_addr =
2269 			cpu_to_le32(instance->vf_affiliation_111_h);
2270 	else
2271 		dcmd->sgl.sge32[0].phys_addr =
2272 			cpu_to_le32(new_affiliation_111_h);
2273 
2274 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2275 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2276 
2277 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2278 	       "scsi%d\n", instance->host->host_no);
2279 
2280 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2281 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2282 		       " failed with status 0x%x for scsi%d\n",
2283 		       dcmd->cmd_status, instance->host->host_no);
2284 		retval = 1; /* Do a scan if we couldn't get affiliation */
2285 		goto out;
2286 	}
2287 
2288 	if (!initial) {
2289 		thisVf = new_affiliation_111->thisVf;
2290 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2291 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2292 			    new_affiliation_111->map[ld].policy[thisVf]) {
2293 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2294 				       "Got new LD/VF affiliation for scsi%d\n",
2295 				       instance->host->host_no);
2296 				memcpy(instance->vf_affiliation_111,
2297 				       new_affiliation_111,
2298 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2299 				retval = 1;
2300 				goto out;
2301 			}
2302 	}
2303 out:
2304 	if (new_affiliation_111) {
2305 		pci_free_consistent(instance->pdev,
2306 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2307 				    new_affiliation_111,
2308 				    new_affiliation_111_h);
2309 	}
2310 
2311 	megasas_return_cmd(instance, cmd);
2312 
2313 	return retval;
2314 }
2315 
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2316 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2317 					    int initial)
2318 {
2319 	struct megasas_cmd *cmd;
2320 	struct megasas_dcmd_frame *dcmd;
2321 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2322 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2323 	dma_addr_t new_affiliation_h;
2324 	int i, j, retval = 0, found = 0, doscan = 0;
2325 	u8 thisVf;
2326 
2327 	cmd = megasas_get_cmd(instance);
2328 
2329 	if (!cmd) {
2330 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2331 		       "Failed to get cmd for scsi%d\n",
2332 		       instance->host->host_no);
2333 		return -ENOMEM;
2334 	}
2335 
2336 	dcmd = &cmd->frame->dcmd;
2337 
2338 	if (!instance->vf_affiliation) {
2339 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2340 		       "affiliation for scsi%d\n", instance->host->host_no);
2341 		megasas_return_cmd(instance, cmd);
2342 		return -ENOMEM;
2343 	}
2344 
2345 	if (initial)
2346 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2347 		       sizeof(struct MR_LD_VF_AFFILIATION));
2348 	else {
2349 		new_affiliation =
2350 			pci_zalloc_consistent(instance->pdev,
2351 					      (MAX_LOGICAL_DRIVES + 1) *
2352 					      sizeof(struct MR_LD_VF_AFFILIATION),
2353 					      &new_affiliation_h);
2354 		if (!new_affiliation) {
2355 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2356 			       "memory for new affiliation for scsi%d\n",
2357 			       instance->host->host_no);
2358 			megasas_return_cmd(instance, cmd);
2359 			return -ENOMEM;
2360 		}
2361 	}
2362 
2363 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2364 
2365 	dcmd->cmd = MFI_CMD_DCMD;
2366 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2367 	dcmd->sge_count = 1;
2368 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2369 	dcmd->timeout = 0;
2370 	dcmd->pad_0 = 0;
2371 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2372 		sizeof(struct MR_LD_VF_AFFILIATION));
2373 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2374 
2375 	if (initial)
2376 		dcmd->sgl.sge32[0].phys_addr =
2377 			cpu_to_le32(instance->vf_affiliation_h);
2378 	else
2379 		dcmd->sgl.sge32[0].phys_addr =
2380 			cpu_to_le32(new_affiliation_h);
2381 
2382 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2383 		sizeof(struct MR_LD_VF_AFFILIATION));
2384 
2385 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2386 	       "scsi%d\n", instance->host->host_no);
2387 
2388 
2389 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2390 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2391 		       " failed with status 0x%x for scsi%d\n",
2392 		       dcmd->cmd_status, instance->host->host_no);
2393 		retval = 1; /* Do a scan if we couldn't get affiliation */
2394 		goto out;
2395 	}
2396 
2397 	if (!initial) {
2398 		if (!new_affiliation->ldCount) {
2399 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2400 			       "affiliation for passive path for scsi%d\n",
2401 			       instance->host->host_no);
2402 			retval = 1;
2403 			goto out;
2404 		}
2405 		newmap = new_affiliation->map;
2406 		savedmap = instance->vf_affiliation->map;
2407 		thisVf = new_affiliation->thisVf;
2408 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2409 			found = 0;
2410 			for (j = 0; j < instance->vf_affiliation->ldCount;
2411 			     j++) {
2412 				if (newmap->ref.targetId ==
2413 				    savedmap->ref.targetId) {
2414 					found = 1;
2415 					if (newmap->policy[thisVf] !=
2416 					    savedmap->policy[thisVf]) {
2417 						doscan = 1;
2418 						goto out;
2419 					}
2420 				}
2421 				savedmap = (struct MR_LD_VF_MAP *)
2422 					((unsigned char *)savedmap +
2423 					 savedmap->size);
2424 			}
2425 			if (!found && newmap->policy[thisVf] !=
2426 			    MR_LD_ACCESS_HIDDEN) {
2427 				doscan = 1;
2428 				goto out;
2429 			}
2430 			newmap = (struct MR_LD_VF_MAP *)
2431 				((unsigned char *)newmap + newmap->size);
2432 		}
2433 
2434 		newmap = new_affiliation->map;
2435 		savedmap = instance->vf_affiliation->map;
2436 
2437 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2438 			found = 0;
2439 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2440 				if (savedmap->ref.targetId ==
2441 				    newmap->ref.targetId) {
2442 					found = 1;
2443 					if (savedmap->policy[thisVf] !=
2444 					    newmap->policy[thisVf]) {
2445 						doscan = 1;
2446 						goto out;
2447 					}
2448 				}
2449 				newmap = (struct MR_LD_VF_MAP *)
2450 					((unsigned char *)newmap +
2451 					 newmap->size);
2452 			}
2453 			if (!found && savedmap->policy[thisVf] !=
2454 			    MR_LD_ACCESS_HIDDEN) {
2455 				doscan = 1;
2456 				goto out;
2457 			}
2458 			savedmap = (struct MR_LD_VF_MAP *)
2459 				((unsigned char *)savedmap +
2460 				 savedmap->size);
2461 		}
2462 	}
2463 out:
2464 	if (doscan) {
2465 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2466 		       "affiliation for scsi%d\n", instance->host->host_no);
2467 		memcpy(instance->vf_affiliation, new_affiliation,
2468 		       new_affiliation->size);
2469 		retval = 1;
2470 	}
2471 
2472 	if (new_affiliation)
2473 		pci_free_consistent(instance->pdev,
2474 				    (MAX_LOGICAL_DRIVES + 1) *
2475 				    sizeof(struct MR_LD_VF_AFFILIATION),
2476 				    new_affiliation, new_affiliation_h);
2477 	megasas_return_cmd(instance, cmd);
2478 
2479 	return retval;
2480 }
2481 
2482 /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2483 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2484 	int initial)
2485 {
2486 	int retval;
2487 
2488 	if (instance->PlasmaFW111)
2489 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2490 	else
2491 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2492 	return retval;
2493 }
2494 
2495 /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2496 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2497 					 int initial)
2498 {
2499 	struct megasas_cmd *cmd;
2500 	struct megasas_dcmd_frame *dcmd;
2501 	int retval = 0;
2502 
2503 	cmd = megasas_get_cmd(instance);
2504 
2505 	if (!cmd) {
2506 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2507 		       "Failed to get cmd for scsi%d\n",
2508 		       instance->host->host_no);
2509 		return -ENOMEM;
2510 	}
2511 
2512 	dcmd = &cmd->frame->dcmd;
2513 
2514 	if (initial) {
2515 		instance->hb_host_mem =
2516 			pci_zalloc_consistent(instance->pdev,
2517 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
2518 					      &instance->hb_host_mem_h);
2519 		if (!instance->hb_host_mem) {
2520 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2521 			       " memory for heartbeat host memory for scsi%d\n",
2522 			       instance->host->host_no);
2523 			retval = -ENOMEM;
2524 			goto out;
2525 		}
2526 	}
2527 
2528 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2529 
2530 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2531 	dcmd->cmd = MFI_CMD_DCMD;
2532 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2533 	dcmd->sge_count = 1;
2534 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2535 	dcmd->timeout = 0;
2536 	dcmd->pad_0 = 0;
2537 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2538 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2539 
2540 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2541 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2542 
2543 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2544 	       instance->host->host_no);
2545 
2546 	if ((instance->adapter_type != MFI_SERIES) &&
2547 	    !instance->mask_interrupts)
2548 		retval = megasas_issue_blocked_cmd(instance, cmd,
2549 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2550 	else
2551 		retval = megasas_issue_polled(instance, cmd);
2552 
2553 	if (retval) {
2554 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2555 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2556 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2557 			"timed out" : "failed", instance->host->host_no);
2558 		retval = 1;
2559 	}
2560 
2561 out:
2562 	megasas_return_cmd(instance, cmd);
2563 
2564 	return retval;
2565 }
2566 
2567 /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(struct timer_list * t)2568 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2569 {
2570 	struct megasas_instance *instance =
2571 		from_timer(instance, t, sriov_heartbeat_timer);
2572 
2573 	if (instance->hb_host_mem->HB.fwCounter !=
2574 	    instance->hb_host_mem->HB.driverCounter) {
2575 		instance->hb_host_mem->HB.driverCounter =
2576 			instance->hb_host_mem->HB.fwCounter;
2577 		mod_timer(&instance->sriov_heartbeat_timer,
2578 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2579 	} else {
2580 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2581 		       "completed for scsi%d\n", instance->host->host_no);
2582 		schedule_work(&instance->work_init);
2583 	}
2584 }
2585 
2586 /**
2587  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2588  * @instance:				Adapter soft state
2589  *
2590  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2591  * complete all its outstanding commands. Returns error if one or more IOs
2592  * are pending after this time period. It also marks the controller dead.
2593  */
megasas_wait_for_outstanding(struct megasas_instance * instance)2594 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2595 {
2596 	int i, sl, outstanding;
2597 	u32 reset_index;
2598 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2599 	unsigned long flags;
2600 	struct list_head clist_local;
2601 	struct megasas_cmd *reset_cmd;
2602 	u32 fw_state;
2603 
2604 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2605 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2606 		__func__, __LINE__);
2607 		return FAILED;
2608 	}
2609 
2610 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2611 
2612 		INIT_LIST_HEAD(&clist_local);
2613 		spin_lock_irqsave(&instance->hba_lock, flags);
2614 		list_splice_init(&instance->internal_reset_pending_q,
2615 				&clist_local);
2616 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2617 
2618 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2619 		for (i = 0; i < wait_time; i++) {
2620 			msleep(1000);
2621 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2622 				break;
2623 		}
2624 
2625 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2626 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2627 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2628 			return FAILED;
2629 		}
2630 
2631 		reset_index = 0;
2632 		while (!list_empty(&clist_local)) {
2633 			reset_cmd = list_entry((&clist_local)->next,
2634 						struct megasas_cmd, list);
2635 			list_del_init(&reset_cmd->list);
2636 			if (reset_cmd->scmd) {
2637 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2638 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2639 					reset_index, reset_cmd,
2640 					reset_cmd->scmd->cmnd[0]);
2641 
2642 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2643 				megasas_return_cmd(instance, reset_cmd);
2644 			} else if (reset_cmd->sync_cmd) {
2645 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2646 						"reset queue\n",
2647 						reset_cmd);
2648 
2649 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2650 				instance->instancet->fire_cmd(instance,
2651 						reset_cmd->frame_phys_addr,
2652 						0, instance->reg_set);
2653 			} else {
2654 				dev_notice(&instance->pdev->dev, "%p unexpected"
2655 					"cmds lst\n",
2656 					reset_cmd);
2657 			}
2658 			reset_index++;
2659 		}
2660 
2661 		return SUCCESS;
2662 	}
2663 
2664 	for (i = 0; i < resetwaittime; i++) {
2665 		outstanding = atomic_read(&instance->fw_outstanding);
2666 
2667 		if (!outstanding)
2668 			break;
2669 
2670 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2671 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2672 			       "commands to complete\n",i,outstanding);
2673 			/*
2674 			 * Call cmd completion routine. Cmd to be
2675 			 * be completed directly without depending on isr.
2676 			 */
2677 			megasas_complete_cmd_dpc((unsigned long)instance);
2678 		}
2679 
2680 		msleep(1000);
2681 	}
2682 
2683 	i = 0;
2684 	outstanding = atomic_read(&instance->fw_outstanding);
2685 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2686 
2687 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2688 		goto no_outstanding;
2689 
2690 	if (instance->disableOnlineCtrlReset)
2691 		goto kill_hba_and_failed;
2692 	do {
2693 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2694 			dev_info(&instance->pdev->dev,
2695 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2696 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2697 			if (i == 3)
2698 				goto kill_hba_and_failed;
2699 			megasas_do_ocr(instance);
2700 
2701 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2702 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2703 				__func__, __LINE__);
2704 				return FAILED;
2705 			}
2706 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2707 				__func__, __LINE__);
2708 
2709 			for (sl = 0; sl < 10; sl++)
2710 				msleep(500);
2711 
2712 			outstanding = atomic_read(&instance->fw_outstanding);
2713 
2714 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2715 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2716 				goto no_outstanding;
2717 		}
2718 		i++;
2719 	} while (i <= 3);
2720 
2721 no_outstanding:
2722 
2723 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2724 		__func__, __LINE__);
2725 	return SUCCESS;
2726 
2727 kill_hba_and_failed:
2728 
2729 	/* Reset not supported, kill adapter */
2730 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2731 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2732 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2733 		atomic_read(&instance->fw_outstanding));
2734 	megasas_dump_pending_frames(instance);
2735 	megaraid_sas_kill_hba(instance);
2736 
2737 	return FAILED;
2738 }
2739 
2740 /**
2741  * megasas_generic_reset -	Generic reset routine
2742  * @scmd:			Mid-layer SCSI command
2743  *
2744  * This routine implements a generic reset handler for device, bus and host
2745  * reset requests. Device, bus and host specific reset handlers can use this
2746  * function after they do their specific tasks.
2747  */
megasas_generic_reset(struct scsi_cmnd * scmd)2748 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2749 {
2750 	int ret_val;
2751 	struct megasas_instance *instance;
2752 
2753 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2754 
2755 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2756 		 scmd->cmnd[0], scmd->retries);
2757 
2758 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2759 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2760 		return FAILED;
2761 	}
2762 
2763 	ret_val = megasas_wait_for_outstanding(instance);
2764 	if (ret_val == SUCCESS)
2765 		dev_notice(&instance->pdev->dev, "reset successful\n");
2766 	else
2767 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2768 
2769 	return ret_val;
2770 }
2771 
2772 /**
2773  * megasas_reset_timer - quiesce the adapter if required
2774  * @scmd:		scsi cmnd
2775  *
2776  * Sets the FW busy flag and reduces the host->can_queue if the
2777  * cmd has not been completed within the timeout period.
2778  */
2779 static enum
megasas_reset_timer(struct scsi_cmnd * scmd)2780 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2781 {
2782 	struct megasas_instance *instance;
2783 	unsigned long flags;
2784 
2785 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2786 				(scmd_timeout * 2) * HZ)) {
2787 		return BLK_EH_DONE;
2788 	}
2789 
2790 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2791 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2792 		/* FW is busy, throttle IO */
2793 		spin_lock_irqsave(instance->host->host_lock, flags);
2794 
2795 		instance->host->can_queue = instance->throttlequeuedepth;
2796 		instance->last_time = jiffies;
2797 		instance->flag |= MEGASAS_FW_BUSY;
2798 
2799 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2800 	}
2801 	return BLK_EH_RESET_TIMER;
2802 }
2803 
2804 /**
2805  * megasas_dump_frame -	This function will dump MPT/MFI frame
2806  */
2807 static inline void
megasas_dump_frame(void * mpi_request,int sz)2808 megasas_dump_frame(void *mpi_request, int sz)
2809 {
2810 	int i;
2811 	__le32 *mfp = (__le32 *)mpi_request;
2812 
2813 	printk(KERN_INFO "IO request frame:\n\t");
2814 	for (i = 0; i < sz / sizeof(__le32); i++) {
2815 		if (i && ((i % 8) == 0))
2816 			printk("\n\t");
2817 		printk("%08x ", le32_to_cpu(mfp[i]));
2818 	}
2819 	printk("\n");
2820 }
2821 
2822 /**
2823  * megasas_reset_bus_host -	Bus & host reset handler entry point
2824  */
megasas_reset_bus_host(struct scsi_cmnd * scmd)2825 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2826 {
2827 	int ret;
2828 	struct megasas_instance *instance;
2829 
2830 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2831 
2832 	scmd_printk(KERN_INFO, scmd,
2833 		"Controller reset is requested due to IO timeout\n"
2834 		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
2835 		" SCSI host busy: %d\t FW outstanding: %d\n",
2836 		scmd, scmd->device->host->shost_state,
2837 		scsi_host_busy(scmd->device->host),
2838 		atomic_read(&instance->fw_outstanding));
2839 
2840 	/*
2841 	 * First wait for all commands to complete
2842 	 */
2843 	if (instance->adapter_type == MFI_SERIES) {
2844 		ret = megasas_generic_reset(scmd);
2845 	} else {
2846 		struct megasas_cmd_fusion *cmd;
2847 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2848 		if (cmd)
2849 			megasas_dump_frame(cmd->io_request,
2850 				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2851 		ret = megasas_reset_fusion(scmd->device->host,
2852 				SCSIIO_TIMEOUT_OCR);
2853 	}
2854 
2855 	return ret;
2856 }
2857 
2858 /**
2859  * megasas_task_abort - Issues task abort request to firmware
2860  *			(supported only for fusion adapters)
2861  * @scmd:		SCSI command pointer
2862  */
megasas_task_abort(struct scsi_cmnd * scmd)2863 static int megasas_task_abort(struct scsi_cmnd *scmd)
2864 {
2865 	int ret;
2866 	struct megasas_instance *instance;
2867 
2868 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2869 
2870 	if (instance->adapter_type != MFI_SERIES)
2871 		ret = megasas_task_abort_fusion(scmd);
2872 	else {
2873 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2874 		ret = FAILED;
2875 	}
2876 
2877 	return ret;
2878 }
2879 
2880 /**
2881  * megasas_reset_target:  Issues target reset request to firmware
2882  *                        (supported only for fusion adapters)
2883  * @scmd:                 SCSI command pointer
2884  */
megasas_reset_target(struct scsi_cmnd * scmd)2885 static int megasas_reset_target(struct scsi_cmnd *scmd)
2886 {
2887 	int ret;
2888 	struct megasas_instance *instance;
2889 
2890 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2891 
2892 	if (instance->adapter_type != MFI_SERIES)
2893 		ret = megasas_reset_target_fusion(scmd);
2894 	else {
2895 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2896 		ret = FAILED;
2897 	}
2898 
2899 	return ret;
2900 }
2901 
2902 /**
2903  * megasas_bios_param - Returns disk geometry for a disk
2904  * @sdev:		device handle
2905  * @bdev:		block device
2906  * @capacity:		drive capacity
2907  * @geom:		geometry parameters
2908  */
2909 static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])2910 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2911 		 sector_t capacity, int geom[])
2912 {
2913 	int heads;
2914 	int sectors;
2915 	sector_t cylinders;
2916 	unsigned long tmp;
2917 
2918 	/* Default heads (64) & sectors (32) */
2919 	heads = 64;
2920 	sectors = 32;
2921 
2922 	tmp = heads * sectors;
2923 	cylinders = capacity;
2924 
2925 	sector_div(cylinders, tmp);
2926 
2927 	/*
2928 	 * Handle extended translation size for logical drives > 1Gb
2929 	 */
2930 
2931 	if (capacity >= 0x200000) {
2932 		heads = 255;
2933 		sectors = 63;
2934 		tmp = heads*sectors;
2935 		cylinders = capacity;
2936 		sector_div(cylinders, tmp);
2937 	}
2938 
2939 	geom[0] = heads;
2940 	geom[1] = sectors;
2941 	geom[2] = cylinders;
2942 
2943 	return 0;
2944 }
2945 
2946 static void megasas_aen_polling(struct work_struct *work);
2947 
2948 /**
2949  * megasas_service_aen -	Processes an event notification
2950  * @instance:			Adapter soft state
2951  * @cmd:			AEN command completed by the ISR
2952  *
2953  * For AEN, driver sends a command down to FW that is held by the FW till an
2954  * event occurs. When an event of interest occurs, FW completes the command
2955  * that it was previously holding.
2956  *
2957  * This routines sends SIGIO signal to processes that have registered with the
2958  * driver for AEN.
2959  */
2960 static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)2961 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2962 {
2963 	unsigned long flags;
2964 
2965 	/*
2966 	 * Don't signal app if it is just an aborted previously registered aen
2967 	 */
2968 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
2969 		spin_lock_irqsave(&poll_aen_lock, flags);
2970 		megasas_poll_wait_aen = 1;
2971 		spin_unlock_irqrestore(&poll_aen_lock, flags);
2972 		wake_up(&megasas_poll_wait);
2973 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2974 	}
2975 	else
2976 		cmd->abort_aen = 0;
2977 
2978 	instance->aen_cmd = NULL;
2979 
2980 	megasas_return_cmd(instance, cmd);
2981 
2982 	if ((instance->unload == 0) &&
2983 		((instance->issuepend_done == 1))) {
2984 		struct megasas_aen_event *ev;
2985 
2986 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2987 		if (!ev) {
2988 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2989 		} else {
2990 			ev->instance = instance;
2991 			instance->ev = ev;
2992 			INIT_DELAYED_WORK(&ev->hotplug_work,
2993 					  megasas_aen_polling);
2994 			schedule_delayed_work(&ev->hotplug_work, 0);
2995 		}
2996 	}
2997 }
2998 
2999 static ssize_t
megasas_fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3000 megasas_fw_crash_buffer_store(struct device *cdev,
3001 	struct device_attribute *attr, const char *buf, size_t count)
3002 {
3003 	struct Scsi_Host *shost = class_to_shost(cdev);
3004 	struct megasas_instance *instance =
3005 		(struct megasas_instance *) shost->hostdata;
3006 	int val = 0;
3007 	unsigned long flags;
3008 
3009 	if (kstrtoint(buf, 0, &val) != 0)
3010 		return -EINVAL;
3011 
3012 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3013 	instance->fw_crash_buffer_offset = val;
3014 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3015 	return strlen(buf);
3016 }
3017 
3018 static ssize_t
megasas_fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3019 megasas_fw_crash_buffer_show(struct device *cdev,
3020 	struct device_attribute *attr, char *buf)
3021 {
3022 	struct Scsi_Host *shost = class_to_shost(cdev);
3023 	struct megasas_instance *instance =
3024 		(struct megasas_instance *) shost->hostdata;
3025 	u32 size;
3026 	unsigned long buff_addr;
3027 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3028 	unsigned long src_addr;
3029 	unsigned long flags;
3030 	u32 buff_offset;
3031 
3032 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3033 	buff_offset = instance->fw_crash_buffer_offset;
3034 	if (!instance->crash_dump_buf &&
3035 		!((instance->fw_crash_state == AVAILABLE) ||
3036 		(instance->fw_crash_state == COPYING))) {
3037 		dev_err(&instance->pdev->dev,
3038 			"Firmware crash dump is not available\n");
3039 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3040 		return -EINVAL;
3041 	}
3042 
3043 	buff_addr = (unsigned long) buf;
3044 
3045 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3046 		dev_err(&instance->pdev->dev,
3047 			"Firmware crash dump offset is out of range\n");
3048 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3049 		return 0;
3050 	}
3051 
3052 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3053 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3054 
3055 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3056 		(buff_offset % dmachunk);
3057 	memcpy(buf, (void *)src_addr, size);
3058 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3059 
3060 	return size;
3061 }
3062 
3063 static ssize_t
megasas_fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3064 megasas_fw_crash_buffer_size_show(struct device *cdev,
3065 	struct device_attribute *attr, char *buf)
3066 {
3067 	struct Scsi_Host *shost = class_to_shost(cdev);
3068 	struct megasas_instance *instance =
3069 		(struct megasas_instance *) shost->hostdata;
3070 
3071 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3072 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3073 }
3074 
3075 static ssize_t
megasas_fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3076 megasas_fw_crash_state_store(struct device *cdev,
3077 	struct device_attribute *attr, const char *buf, size_t count)
3078 {
3079 	struct Scsi_Host *shost = class_to_shost(cdev);
3080 	struct megasas_instance *instance =
3081 		(struct megasas_instance *) shost->hostdata;
3082 	int val = 0;
3083 	unsigned long flags;
3084 
3085 	if (kstrtoint(buf, 0, &val) != 0)
3086 		return -EINVAL;
3087 
3088 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3089 		dev_err(&instance->pdev->dev, "application updates invalid "
3090 			"firmware crash state\n");
3091 		return -EINVAL;
3092 	}
3093 
3094 	instance->fw_crash_state = val;
3095 
3096 	if ((val == COPIED) || (val == COPY_ERROR)) {
3097 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3098 		megasas_free_host_crash_buffer(instance);
3099 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3100 		if (val == COPY_ERROR)
3101 			dev_info(&instance->pdev->dev, "application failed to "
3102 				"copy Firmware crash dump\n");
3103 		else
3104 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3105 				"copied successfully\n");
3106 	}
3107 	return strlen(buf);
3108 }
3109 
3110 static ssize_t
megasas_fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3111 megasas_fw_crash_state_show(struct device *cdev,
3112 	struct device_attribute *attr, char *buf)
3113 {
3114 	struct Scsi_Host *shost = class_to_shost(cdev);
3115 	struct megasas_instance *instance =
3116 		(struct megasas_instance *) shost->hostdata;
3117 
3118 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3119 }
3120 
3121 static ssize_t
megasas_page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3122 megasas_page_size_show(struct device *cdev,
3123 	struct device_attribute *attr, char *buf)
3124 {
3125 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3126 }
3127 
3128 static ssize_t
megasas_ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3129 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3130 	char *buf)
3131 {
3132 	struct Scsi_Host *shost = class_to_shost(cdev);
3133 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3134 
3135 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3136 }
3137 
3138 static ssize_t
megasas_fw_cmds_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3139 megasas_fw_cmds_outstanding_show(struct device *cdev,
3140 				 struct device_attribute *attr, char *buf)
3141 {
3142 	struct Scsi_Host *shost = class_to_shost(cdev);
3143 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3144 
3145 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3146 }
3147 
3148 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3149 	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3150 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3151 	megasas_fw_crash_buffer_size_show, NULL);
3152 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3153 	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3154 static DEVICE_ATTR(page_size, S_IRUGO,
3155 	megasas_page_size_show, NULL);
3156 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3157 	megasas_ldio_outstanding_show, NULL);
3158 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
3159 	megasas_fw_cmds_outstanding_show, NULL);
3160 
3161 struct device_attribute *megaraid_host_attrs[] = {
3162 	&dev_attr_fw_crash_buffer_size,
3163 	&dev_attr_fw_crash_buffer,
3164 	&dev_attr_fw_crash_state,
3165 	&dev_attr_page_size,
3166 	&dev_attr_ldio_outstanding,
3167 	&dev_attr_fw_cmds_outstanding,
3168 	NULL,
3169 };
3170 
3171 /*
3172  * Scsi host template for megaraid_sas driver
3173  */
3174 static struct scsi_host_template megasas_template = {
3175 
3176 	.module = THIS_MODULE,
3177 	.name = "Avago SAS based MegaRAID driver",
3178 	.proc_name = "megaraid_sas",
3179 	.slave_configure = megasas_slave_configure,
3180 	.slave_alloc = megasas_slave_alloc,
3181 	.slave_destroy = megasas_slave_destroy,
3182 	.queuecommand = megasas_queue_command,
3183 	.eh_target_reset_handler = megasas_reset_target,
3184 	.eh_abort_handler = megasas_task_abort,
3185 	.eh_host_reset_handler = megasas_reset_bus_host,
3186 	.eh_timed_out = megasas_reset_timer,
3187 	.shost_attrs = megaraid_host_attrs,
3188 	.bios_param = megasas_bios_param,
3189 	.use_clustering = ENABLE_CLUSTERING,
3190 	.change_queue_depth = scsi_change_queue_depth,
3191 	.no_write_same = 1,
3192 };
3193 
3194 /**
3195  * megasas_complete_int_cmd -	Completes an internal command
3196  * @instance:			Adapter soft state
3197  * @cmd:			Command to be completed
3198  *
3199  * The megasas_issue_blocked_cmd() function waits for a command to complete
3200  * after it issues a command. This function wakes up that waiting routine by
3201  * calling wake_up() on the wait queue.
3202  */
3203 static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3204 megasas_complete_int_cmd(struct megasas_instance *instance,
3205 			 struct megasas_cmd *cmd)
3206 {
3207 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3208 	wake_up(&instance->int_cmd_wait_q);
3209 }
3210 
3211 /**
3212  * megasas_complete_abort -	Completes aborting a command
3213  * @instance:			Adapter soft state
3214  * @cmd:			Cmd that was issued to abort another cmd
3215  *
3216  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3217  * after it issues an abort on a previously issued command. This function
3218  * wakes up all functions waiting on the same wait queue.
3219  */
3220 static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3221 megasas_complete_abort(struct megasas_instance *instance,
3222 		       struct megasas_cmd *cmd)
3223 {
3224 	if (cmd->sync_cmd) {
3225 		cmd->sync_cmd = 0;
3226 		cmd->cmd_status_drv = 0;
3227 		wake_up(&instance->abort_cmd_wait_q);
3228 	}
3229 }
3230 
3231 /**
3232  * megasas_complete_cmd -	Completes a command
3233  * @instance:			Adapter soft state
3234  * @cmd:			Command to be completed
3235  * @alt_status:			If non-zero, use this value as status to
3236  *				SCSI mid-layer instead of the value returned
3237  *				by the FW. This should be used if caller wants
3238  *				an alternate status (as in the case of aborted
3239  *				commands)
3240  */
3241 void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3242 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3243 		     u8 alt_status)
3244 {
3245 	int exception = 0;
3246 	struct megasas_header *hdr = &cmd->frame->hdr;
3247 	unsigned long flags;
3248 	struct fusion_context *fusion = instance->ctrl_context;
3249 	u32 opcode, status;
3250 
3251 	/* flag for the retry reset */
3252 	cmd->retry_for_fw_reset = 0;
3253 
3254 	if (cmd->scmd)
3255 		cmd->scmd->SCp.ptr = NULL;
3256 
3257 	switch (hdr->cmd) {
3258 	case MFI_CMD_INVALID:
3259 		/* Some older 1068 controller FW may keep a pended
3260 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3261 		   when booting the kdump kernel.  Ignore this command to
3262 		   prevent a kernel panic on shutdown of the kdump kernel. */
3263 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3264 		       "completed\n");
3265 		dev_warn(&instance->pdev->dev, "If you have a controller "
3266 		       "other than PERC5, please upgrade your firmware\n");
3267 		break;
3268 	case MFI_CMD_PD_SCSI_IO:
3269 	case MFI_CMD_LD_SCSI_IO:
3270 
3271 		/*
3272 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3273 		 * issued either through an IO path or an IOCTL path. If it
3274 		 * was via IOCTL, we will send it to internal completion.
3275 		 */
3276 		if (cmd->sync_cmd) {
3277 			cmd->sync_cmd = 0;
3278 			megasas_complete_int_cmd(instance, cmd);
3279 			break;
3280 		}
3281 
3282 	case MFI_CMD_LD_READ:
3283 	case MFI_CMD_LD_WRITE:
3284 
3285 		if (alt_status) {
3286 			cmd->scmd->result = alt_status << 16;
3287 			exception = 1;
3288 		}
3289 
3290 		if (exception) {
3291 
3292 			atomic_dec(&instance->fw_outstanding);
3293 
3294 			scsi_dma_unmap(cmd->scmd);
3295 			cmd->scmd->scsi_done(cmd->scmd);
3296 			megasas_return_cmd(instance, cmd);
3297 
3298 			break;
3299 		}
3300 
3301 		switch (hdr->cmd_status) {
3302 
3303 		case MFI_STAT_OK:
3304 			cmd->scmd->result = DID_OK << 16;
3305 			break;
3306 
3307 		case MFI_STAT_SCSI_IO_FAILED:
3308 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3309 			cmd->scmd->result =
3310 			    (DID_ERROR << 16) | hdr->scsi_status;
3311 			break;
3312 
3313 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3314 
3315 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3316 
3317 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3318 				memset(cmd->scmd->sense_buffer, 0,
3319 				       SCSI_SENSE_BUFFERSIZE);
3320 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3321 				       hdr->sense_len);
3322 
3323 				cmd->scmd->result |= DRIVER_SENSE << 24;
3324 			}
3325 
3326 			break;
3327 
3328 		case MFI_STAT_LD_OFFLINE:
3329 		case MFI_STAT_DEVICE_NOT_FOUND:
3330 			cmd->scmd->result = DID_BAD_TARGET << 16;
3331 			break;
3332 
3333 		default:
3334 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3335 			       hdr->cmd_status);
3336 			cmd->scmd->result = DID_ERROR << 16;
3337 			break;
3338 		}
3339 
3340 		atomic_dec(&instance->fw_outstanding);
3341 
3342 		scsi_dma_unmap(cmd->scmd);
3343 		cmd->scmd->scsi_done(cmd->scmd);
3344 		megasas_return_cmd(instance, cmd);
3345 
3346 		break;
3347 
3348 	case MFI_CMD_SMP:
3349 	case MFI_CMD_STP:
3350 	case MFI_CMD_NVME:
3351 		megasas_complete_int_cmd(instance, cmd);
3352 		break;
3353 
3354 	case MFI_CMD_DCMD:
3355 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3356 		/* Check for LD map update */
3357 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3358 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3359 			fusion->fast_path_io = 0;
3360 			spin_lock_irqsave(instance->host->host_lock, flags);
3361 			status = cmd->frame->hdr.cmd_status;
3362 			instance->map_update_cmd = NULL;
3363 			if (status != MFI_STAT_OK) {
3364 				if (status != MFI_STAT_NOT_FOUND)
3365 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3366 					       cmd->frame->hdr.cmd_status);
3367 				else {
3368 					megasas_return_cmd(instance, cmd);
3369 					spin_unlock_irqrestore(
3370 						instance->host->host_lock,
3371 						flags);
3372 					break;
3373 				}
3374 			}
3375 
3376 			megasas_return_cmd(instance, cmd);
3377 
3378 			/*
3379 			 * Set fast path IO to ZERO.
3380 			 * Validate Map will set proper value.
3381 			 * Meanwhile all IOs will go as LD IO.
3382 			 */
3383 			if (status == MFI_STAT_OK &&
3384 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3385 				instance->map_id++;
3386 				fusion->fast_path_io = 1;
3387 			} else {
3388 				fusion->fast_path_io = 0;
3389 			}
3390 
3391 			megasas_sync_map_info(instance);
3392 			spin_unlock_irqrestore(instance->host->host_lock,
3393 					       flags);
3394 			break;
3395 		}
3396 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3397 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3398 			spin_lock_irqsave(&poll_aen_lock, flags);
3399 			megasas_poll_wait_aen = 0;
3400 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3401 		}
3402 
3403 		/* FW has an updated PD sequence */
3404 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3405 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3406 
3407 			spin_lock_irqsave(instance->host->host_lock, flags);
3408 			status = cmd->frame->hdr.cmd_status;
3409 			instance->jbod_seq_cmd = NULL;
3410 			megasas_return_cmd(instance, cmd);
3411 
3412 			if (status == MFI_STAT_OK) {
3413 				instance->pd_seq_map_id++;
3414 				/* Re-register a pd sync seq num cmd */
3415 				if (megasas_sync_pd_seq_num(instance, true))
3416 					instance->use_seqnum_jbod_fp = false;
3417 			} else
3418 				instance->use_seqnum_jbod_fp = false;
3419 
3420 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3421 			break;
3422 		}
3423 
3424 		/*
3425 		 * See if got an event notification
3426 		 */
3427 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3428 			megasas_service_aen(instance, cmd);
3429 		else
3430 			megasas_complete_int_cmd(instance, cmd);
3431 
3432 		break;
3433 
3434 	case MFI_CMD_ABORT:
3435 		/*
3436 		 * Cmd issued to abort another cmd returned
3437 		 */
3438 		megasas_complete_abort(instance, cmd);
3439 		break;
3440 
3441 	default:
3442 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3443 		       hdr->cmd);
3444 		megasas_complete_int_cmd(instance, cmd);
3445 		break;
3446 	}
3447 }
3448 
3449 /**
3450  * megasas_issue_pending_cmds_again -	issue all pending cmds
3451  *					in FW again because of the fw reset
3452  * @instance:				Adapter soft state
3453  */
3454 static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3455 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3456 {
3457 	struct megasas_cmd *cmd;
3458 	struct list_head clist_local;
3459 	union megasas_evt_class_locale class_locale;
3460 	unsigned long flags;
3461 	u32 seq_num;
3462 
3463 	INIT_LIST_HEAD(&clist_local);
3464 	spin_lock_irqsave(&instance->hba_lock, flags);
3465 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3466 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3467 
3468 	while (!list_empty(&clist_local)) {
3469 		cmd = list_entry((&clist_local)->next,
3470 					struct megasas_cmd, list);
3471 		list_del_init(&cmd->list);
3472 
3473 		if (cmd->sync_cmd || cmd->scmd) {
3474 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3475 				"detected to be pending while HBA reset\n",
3476 					cmd, cmd->scmd, cmd->sync_cmd);
3477 
3478 			cmd->retry_for_fw_reset++;
3479 
3480 			if (cmd->retry_for_fw_reset == 3) {
3481 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3482 					"was tried multiple times during reset."
3483 					"Shutting down the HBA\n",
3484 					cmd, cmd->scmd, cmd->sync_cmd);
3485 				instance->instancet->disable_intr(instance);
3486 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3487 				megaraid_sas_kill_hba(instance);
3488 				return;
3489 			}
3490 		}
3491 
3492 		if (cmd->sync_cmd == 1) {
3493 			if (cmd->scmd) {
3494 				dev_notice(&instance->pdev->dev, "unexpected"
3495 					"cmd attached to internal command!\n");
3496 			}
3497 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3498 						"on the internal reset queue,"
3499 						"issue it again.\n", cmd);
3500 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3501 			instance->instancet->fire_cmd(instance,
3502 							cmd->frame_phys_addr,
3503 							0, instance->reg_set);
3504 		} else if (cmd->scmd) {
3505 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3506 			"detected on the internal queue, issue again.\n",
3507 			cmd, cmd->scmd->cmnd[0]);
3508 
3509 			atomic_inc(&instance->fw_outstanding);
3510 			instance->instancet->fire_cmd(instance,
3511 					cmd->frame_phys_addr,
3512 					cmd->frame_count-1, instance->reg_set);
3513 		} else {
3514 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3515 				"internal reset defer list while re-issue!!\n",
3516 				cmd);
3517 		}
3518 	}
3519 
3520 	if (instance->aen_cmd) {
3521 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3522 		megasas_return_cmd(instance, instance->aen_cmd);
3523 
3524 		instance->aen_cmd = NULL;
3525 	}
3526 
3527 	/*
3528 	 * Initiate AEN (Asynchronous Event Notification)
3529 	 */
3530 	seq_num = instance->last_seq_num;
3531 	class_locale.members.reserved = 0;
3532 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3533 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3534 
3535 	megasas_register_aen(instance, seq_num, class_locale.word);
3536 }
3537 
3538 /**
3539  * Move the internal reset pending commands to a deferred queue.
3540  *
3541  * We move the commands pending at internal reset time to a
3542  * pending queue. This queue would be flushed after successful
3543  * completion of the internal reset sequence. if the internal reset
3544  * did not complete in time, the kernel reset handler would flush
3545  * these commands.
3546  **/
3547 static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3548 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3549 {
3550 	struct megasas_cmd *cmd;
3551 	int i;
3552 	u16 max_cmd = instance->max_fw_cmds;
3553 	u32 defer_index;
3554 	unsigned long flags;
3555 
3556 	defer_index = 0;
3557 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3558 	for (i = 0; i < max_cmd; i++) {
3559 		cmd = instance->cmd_list[i];
3560 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3561 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3562 					"on the defer queue as internal\n",
3563 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3564 
3565 			if (!list_empty(&cmd->list)) {
3566 				dev_notice(&instance->pdev->dev, "ERROR while"
3567 					" moving this cmd:%p, %d %p, it was"
3568 					"discovered on some list?\n",
3569 					cmd, cmd->sync_cmd, cmd->scmd);
3570 
3571 				list_del_init(&cmd->list);
3572 			}
3573 			defer_index++;
3574 			list_add_tail(&cmd->list,
3575 				&instance->internal_reset_pending_q);
3576 		}
3577 	}
3578 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3579 }
3580 
3581 
3582 static void
process_fw_state_change_wq(struct work_struct * work)3583 process_fw_state_change_wq(struct work_struct *work)
3584 {
3585 	struct megasas_instance *instance =
3586 		container_of(work, struct megasas_instance, work_init);
3587 	u32 wait;
3588 	unsigned long flags;
3589 
3590     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3591 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3592 				atomic_read(&instance->adprecovery));
3593 		return ;
3594 	}
3595 
3596 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3597 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3598 					"state, restarting it...\n");
3599 
3600 		instance->instancet->disable_intr(instance);
3601 		atomic_set(&instance->fw_outstanding, 0);
3602 
3603 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3604 		instance->instancet->adp_reset(instance, instance->reg_set);
3605 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3606 
3607 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3608 					"initiating next stage...\n");
3609 
3610 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3611 					"state 2 starting...\n");
3612 
3613 		/* waiting for about 20 second before start the second init */
3614 		for (wait = 0; wait < 30; wait++) {
3615 			msleep(1000);
3616 		}
3617 
3618 		if (megasas_transition_to_ready(instance, 1)) {
3619 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3620 
3621 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3622 			megaraid_sas_kill_hba(instance);
3623 			return ;
3624 		}
3625 
3626 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3627 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3628 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3629 			) {
3630 			*instance->consumer = *instance->producer;
3631 		} else {
3632 			*instance->consumer = 0;
3633 			*instance->producer = 0;
3634 		}
3635 
3636 		megasas_issue_init_mfi(instance);
3637 
3638 		spin_lock_irqsave(&instance->hba_lock, flags);
3639 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3640 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3641 		instance->instancet->enable_intr(instance);
3642 
3643 		megasas_issue_pending_cmds_again(instance);
3644 		instance->issuepend_done = 1;
3645 	}
3646 }
3647 
3648 /**
3649  * megasas_deplete_reply_queue -	Processes all completed commands
3650  * @instance:				Adapter soft state
3651  * @alt_status:				Alternate status to be returned to
3652  *					SCSI mid-layer instead of the status
3653  *					returned by the FW
3654  * Note: this must be called with hba lock held
3655  */
3656 static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)3657 megasas_deplete_reply_queue(struct megasas_instance *instance,
3658 					u8 alt_status)
3659 {
3660 	u32 mfiStatus;
3661 	u32 fw_state;
3662 
3663 	if ((mfiStatus = instance->instancet->check_reset(instance,
3664 					instance->reg_set)) == 1) {
3665 		return IRQ_HANDLED;
3666 	}
3667 
3668 	if ((mfiStatus = instance->instancet->clear_intr(
3669 						instance->reg_set)
3670 						) == 0) {
3671 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3672 		if (!instance->msix_vectors)
3673 			return IRQ_NONE;
3674 	}
3675 
3676 	instance->mfiStatus = mfiStatus;
3677 
3678 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3679 		fw_state = instance->instancet->read_fw_status_reg(
3680 				instance->reg_set) & MFI_STATE_MASK;
3681 
3682 		if (fw_state != MFI_STATE_FAULT) {
3683 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3684 						fw_state);
3685 		}
3686 
3687 		if ((fw_state == MFI_STATE_FAULT) &&
3688 				(instance->disableOnlineCtrlReset == 0)) {
3689 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3690 
3691 			if ((instance->pdev->device ==
3692 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3693 				(instance->pdev->device ==
3694 					PCI_DEVICE_ID_DELL_PERC5) ||
3695 				(instance->pdev->device ==
3696 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3697 
3698 				*instance->consumer =
3699 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3700 			}
3701 
3702 
3703 			instance->instancet->disable_intr(instance);
3704 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3705 			instance->issuepend_done = 0;
3706 
3707 			atomic_set(&instance->fw_outstanding, 0);
3708 			megasas_internal_reset_defer_cmds(instance);
3709 
3710 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3711 					fw_state, atomic_read(&instance->adprecovery));
3712 
3713 			schedule_work(&instance->work_init);
3714 			return IRQ_HANDLED;
3715 
3716 		} else {
3717 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3718 				fw_state, instance->disableOnlineCtrlReset);
3719 		}
3720 	}
3721 
3722 	tasklet_schedule(&instance->isr_tasklet);
3723 	return IRQ_HANDLED;
3724 }
3725 /**
3726  * megasas_isr - isr entry point
3727  */
megasas_isr(int irq,void * devp)3728 static irqreturn_t megasas_isr(int irq, void *devp)
3729 {
3730 	struct megasas_irq_context *irq_context = devp;
3731 	struct megasas_instance *instance = irq_context->instance;
3732 	unsigned long flags;
3733 	irqreturn_t rc;
3734 
3735 	if (atomic_read(&instance->fw_reset_no_pci_access))
3736 		return IRQ_HANDLED;
3737 
3738 	spin_lock_irqsave(&instance->hba_lock, flags);
3739 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3740 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3741 
3742 	return rc;
3743 }
3744 
3745 /**
3746  * megasas_transition_to_ready -	Move the FW to READY state
3747  * @instance:				Adapter soft state
3748  *
3749  * During the initialization, FW passes can potentially be in any one of
3750  * several possible states. If the FW in operational, waiting-for-handshake
3751  * states, driver must take steps to bring it to ready state. Otherwise, it
3752  * has to wait for the ready state.
3753  */
3754 int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)3755 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3756 {
3757 	int i;
3758 	u8 max_wait;
3759 	u32 fw_state;
3760 	u32 cur_state;
3761 	u32 abs_state, curr_abs_state;
3762 
3763 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3764 	fw_state = abs_state & MFI_STATE_MASK;
3765 
3766 	if (fw_state != MFI_STATE_READY)
3767 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3768 		       " state\n");
3769 
3770 	while (fw_state != MFI_STATE_READY) {
3771 
3772 		switch (fw_state) {
3773 
3774 		case MFI_STATE_FAULT:
3775 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3776 			if (ocr) {
3777 				max_wait = MEGASAS_RESET_WAIT_TIME;
3778 				cur_state = MFI_STATE_FAULT;
3779 				break;
3780 			} else
3781 				return -ENODEV;
3782 
3783 		case MFI_STATE_WAIT_HANDSHAKE:
3784 			/*
3785 			 * Set the CLR bit in inbound doorbell
3786 			 */
3787 			if ((instance->pdev->device ==
3788 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3789 				(instance->pdev->device ==
3790 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3791 				(instance->adapter_type != MFI_SERIES))
3792 				writel(
3793 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3794 				  &instance->reg_set->doorbell);
3795 			else
3796 				writel(
3797 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3798 					&instance->reg_set->inbound_doorbell);
3799 
3800 			max_wait = MEGASAS_RESET_WAIT_TIME;
3801 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3802 			break;
3803 
3804 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3805 			if ((instance->pdev->device ==
3806 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3807 				(instance->pdev->device ==
3808 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3809 				(instance->adapter_type != MFI_SERIES))
3810 				writel(MFI_INIT_HOTPLUG,
3811 				       &instance->reg_set->doorbell);
3812 			else
3813 				writel(MFI_INIT_HOTPLUG,
3814 					&instance->reg_set->inbound_doorbell);
3815 
3816 			max_wait = MEGASAS_RESET_WAIT_TIME;
3817 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3818 			break;
3819 
3820 		case MFI_STATE_OPERATIONAL:
3821 			/*
3822 			 * Bring it to READY state; assuming max wait 10 secs
3823 			 */
3824 			instance->instancet->disable_intr(instance);
3825 			if ((instance->pdev->device ==
3826 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3827 				(instance->pdev->device ==
3828 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3829 				(instance->adapter_type != MFI_SERIES)) {
3830 				writel(MFI_RESET_FLAGS,
3831 					&instance->reg_set->doorbell);
3832 
3833 				if (instance->adapter_type != MFI_SERIES) {
3834 					for (i = 0; i < (10 * 1000); i += 20) {
3835 						if (readl(
3836 							    &instance->
3837 							    reg_set->
3838 							    doorbell) & 1)
3839 							msleep(20);
3840 						else
3841 							break;
3842 					}
3843 				}
3844 			} else
3845 				writel(MFI_RESET_FLAGS,
3846 					&instance->reg_set->inbound_doorbell);
3847 
3848 			max_wait = MEGASAS_RESET_WAIT_TIME;
3849 			cur_state = MFI_STATE_OPERATIONAL;
3850 			break;
3851 
3852 		case MFI_STATE_UNDEFINED:
3853 			/*
3854 			 * This state should not last for more than 2 seconds
3855 			 */
3856 			max_wait = MEGASAS_RESET_WAIT_TIME;
3857 			cur_state = MFI_STATE_UNDEFINED;
3858 			break;
3859 
3860 		case MFI_STATE_BB_INIT:
3861 			max_wait = MEGASAS_RESET_WAIT_TIME;
3862 			cur_state = MFI_STATE_BB_INIT;
3863 			break;
3864 
3865 		case MFI_STATE_FW_INIT:
3866 			max_wait = MEGASAS_RESET_WAIT_TIME;
3867 			cur_state = MFI_STATE_FW_INIT;
3868 			break;
3869 
3870 		case MFI_STATE_FW_INIT_2:
3871 			max_wait = MEGASAS_RESET_WAIT_TIME;
3872 			cur_state = MFI_STATE_FW_INIT_2;
3873 			break;
3874 
3875 		case MFI_STATE_DEVICE_SCAN:
3876 			max_wait = MEGASAS_RESET_WAIT_TIME;
3877 			cur_state = MFI_STATE_DEVICE_SCAN;
3878 			break;
3879 
3880 		case MFI_STATE_FLUSH_CACHE:
3881 			max_wait = MEGASAS_RESET_WAIT_TIME;
3882 			cur_state = MFI_STATE_FLUSH_CACHE;
3883 			break;
3884 
3885 		default:
3886 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3887 			       fw_state);
3888 			return -ENODEV;
3889 		}
3890 
3891 		/*
3892 		 * The cur_state should not last for more than max_wait secs
3893 		 */
3894 		for (i = 0; i < (max_wait * 1000); i++) {
3895 			curr_abs_state = instance->instancet->
3896 				read_fw_status_reg(instance->reg_set);
3897 
3898 			if (abs_state == curr_abs_state) {
3899 				msleep(1);
3900 			} else
3901 				break;
3902 		}
3903 
3904 		/*
3905 		 * Return error if fw_state hasn't changed after max_wait
3906 		 */
3907 		if (curr_abs_state == abs_state) {
3908 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3909 			       "in %d secs\n", fw_state, max_wait);
3910 			return -ENODEV;
3911 		}
3912 
3913 		abs_state = curr_abs_state;
3914 		fw_state = curr_abs_state & MFI_STATE_MASK;
3915 	}
3916 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3917 
3918 	return 0;
3919 }
3920 
3921 /**
3922  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
3923  * @instance:				Adapter soft state
3924  */
megasas_teardown_frame_pool(struct megasas_instance * instance)3925 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3926 {
3927 	int i;
3928 	u16 max_cmd = instance->max_mfi_cmds;
3929 	struct megasas_cmd *cmd;
3930 
3931 	if (!instance->frame_dma_pool)
3932 		return;
3933 
3934 	/*
3935 	 * Return all frames to pool
3936 	 */
3937 	for (i = 0; i < max_cmd; i++) {
3938 
3939 		cmd = instance->cmd_list[i];
3940 
3941 		if (cmd->frame)
3942 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
3943 				      cmd->frame_phys_addr);
3944 
3945 		if (cmd->sense)
3946 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
3947 				      cmd->sense_phys_addr);
3948 	}
3949 
3950 	/*
3951 	 * Now destroy the pool itself
3952 	 */
3953 	dma_pool_destroy(instance->frame_dma_pool);
3954 	dma_pool_destroy(instance->sense_dma_pool);
3955 
3956 	instance->frame_dma_pool = NULL;
3957 	instance->sense_dma_pool = NULL;
3958 }
3959 
3960 /**
3961  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
3962  * @instance:			Adapter soft state
3963  *
3964  * Each command packet has an embedded DMA memory buffer that is used for
3965  * filling MFI frame and the SG list that immediately follows the frame. This
3966  * function creates those DMA memory buffers for each command packet by using
3967  * PCI pool facility.
3968  */
megasas_create_frame_pool(struct megasas_instance * instance)3969 static int megasas_create_frame_pool(struct megasas_instance *instance)
3970 {
3971 	int i;
3972 	u16 max_cmd;
3973 	u32 sge_sz;
3974 	u32 frame_count;
3975 	struct megasas_cmd *cmd;
3976 
3977 	max_cmd = instance->max_mfi_cmds;
3978 
3979 	/*
3980 	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3981 	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3982 	 */
3983 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3984 	    sizeof(struct megasas_sge32);
3985 
3986 	if (instance->flag_ieee)
3987 		sge_sz = sizeof(struct megasas_sge_skinny);
3988 
3989 	/*
3990 	 * For MFI controllers.
3991 	 * max_num_sge = 60
3992 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
3993 	 * Total 960 byte (15 MFI frame of 64 byte)
3994 	 *
3995 	 * Fusion adapter require only 3 extra frame.
3996 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3997 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
3998 	 * Total 192 byte (3 MFI frame of 64 byte)
3999 	 */
4000 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4001 			(15 + 1) : (3 + 1);
4002 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4003 	/*
4004 	 * Use DMA pool facility provided by PCI layer
4005 	 */
4006 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4007 					&instance->pdev->dev,
4008 					instance->mfi_frame_size, 256, 0);
4009 
4010 	if (!instance->frame_dma_pool) {
4011 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4012 		return -ENOMEM;
4013 	}
4014 
4015 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4016 						   &instance->pdev->dev, 128,
4017 						   4, 0);
4018 
4019 	if (!instance->sense_dma_pool) {
4020 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4021 
4022 		dma_pool_destroy(instance->frame_dma_pool);
4023 		instance->frame_dma_pool = NULL;
4024 
4025 		return -ENOMEM;
4026 	}
4027 
4028 	/*
4029 	 * Allocate and attach a frame to each of the commands in cmd_list.
4030 	 * By making cmd->index as the context instead of the &cmd, we can
4031 	 * always use 32bit context regardless of the architecture
4032 	 */
4033 	for (i = 0; i < max_cmd; i++) {
4034 
4035 		cmd = instance->cmd_list[i];
4036 
4037 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4038 					    GFP_KERNEL, &cmd->frame_phys_addr);
4039 
4040 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4041 					    GFP_KERNEL, &cmd->sense_phys_addr);
4042 
4043 		/*
4044 		 * megasas_teardown_frame_pool() takes care of freeing
4045 		 * whatever has been allocated
4046 		 */
4047 		if (!cmd->frame || !cmd->sense) {
4048 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4049 			megasas_teardown_frame_pool(instance);
4050 			return -ENOMEM;
4051 		}
4052 
4053 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4054 		cmd->frame->io.pad_0 = 0;
4055 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4056 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4057 	}
4058 
4059 	return 0;
4060 }
4061 
4062 /**
4063  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4064  * @instance:		Adapter soft state
4065  */
megasas_free_cmds(struct megasas_instance * instance)4066 void megasas_free_cmds(struct megasas_instance *instance)
4067 {
4068 	int i;
4069 
4070 	/* First free the MFI frame pool */
4071 	megasas_teardown_frame_pool(instance);
4072 
4073 	/* Free all the commands in the cmd_list */
4074 	for (i = 0; i < instance->max_mfi_cmds; i++)
4075 
4076 		kfree(instance->cmd_list[i]);
4077 
4078 	/* Free the cmd_list buffer itself */
4079 	kfree(instance->cmd_list);
4080 	instance->cmd_list = NULL;
4081 
4082 	INIT_LIST_HEAD(&instance->cmd_pool);
4083 }
4084 
4085 /**
4086  * megasas_alloc_cmds -	Allocates the command packets
4087  * @instance:		Adapter soft state
4088  *
4089  * Each command that is issued to the FW, whether IO commands from the OS or
4090  * internal commands like IOCTLs, are wrapped in local data structure called
4091  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4092  * the FW.
4093  *
4094  * Each frame has a 32-bit field called context (tag). This context is used
4095  * to get back the megasas_cmd from the frame when a frame gets completed in
4096  * the ISR. Typically the address of the megasas_cmd itself would be used as
4097  * the context. But we wanted to keep the differences between 32 and 64 bit
4098  * systems to the mininum. We always use 32 bit integers for the context. In
4099  * this driver, the 32 bit values are the indices into an array cmd_list.
4100  * This array is used only to look up the megasas_cmd given the context. The
4101  * free commands themselves are maintained in a linked list called cmd_pool.
4102  */
megasas_alloc_cmds(struct megasas_instance * instance)4103 int megasas_alloc_cmds(struct megasas_instance *instance)
4104 {
4105 	int i;
4106 	int j;
4107 	u16 max_cmd;
4108 	struct megasas_cmd *cmd;
4109 
4110 	max_cmd = instance->max_mfi_cmds;
4111 
4112 	/*
4113 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4114 	 * Allocate the dynamic array first and then allocate individual
4115 	 * commands.
4116 	 */
4117 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4118 
4119 	if (!instance->cmd_list) {
4120 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4121 		return -ENOMEM;
4122 	}
4123 
4124 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4125 
4126 	for (i = 0; i < max_cmd; i++) {
4127 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4128 						GFP_KERNEL);
4129 
4130 		if (!instance->cmd_list[i]) {
4131 
4132 			for (j = 0; j < i; j++)
4133 				kfree(instance->cmd_list[j]);
4134 
4135 			kfree(instance->cmd_list);
4136 			instance->cmd_list = NULL;
4137 
4138 			return -ENOMEM;
4139 		}
4140 	}
4141 
4142 	for (i = 0; i < max_cmd; i++) {
4143 		cmd = instance->cmd_list[i];
4144 		memset(cmd, 0, sizeof(struct megasas_cmd));
4145 		cmd->index = i;
4146 		cmd->scmd = NULL;
4147 		cmd->instance = instance;
4148 
4149 		list_add_tail(&cmd->list, &instance->cmd_pool);
4150 	}
4151 
4152 	/*
4153 	 * Create a frame pool and assign one frame to each cmd
4154 	 */
4155 	if (megasas_create_frame_pool(instance)) {
4156 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4157 		megasas_free_cmds(instance);
4158 	}
4159 
4160 	return 0;
4161 }
4162 
4163 /*
4164  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4165  * @instance:				Adapter soft state
4166  *
4167  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4168  * or FW is not under OCR.
4169  */
4170 inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4171 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4172 
4173 	if (instance->adapter_type == MFI_SERIES)
4174 		return KILL_ADAPTER;
4175 	else if (instance->unload ||
4176 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4177 		return IGNORE_TIMEOUT;
4178 	else
4179 		return INITIATE_OCR;
4180 }
4181 
4182 static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4183 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4184 {
4185 	int ret;
4186 	struct megasas_cmd *cmd;
4187 	struct megasas_dcmd_frame *dcmd;
4188 
4189 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4190 	u16 device_id = 0;
4191 
4192 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4193 	cmd = megasas_get_cmd(instance);
4194 
4195 	if (!cmd) {
4196 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4197 		return;
4198 	}
4199 
4200 	dcmd = &cmd->frame->dcmd;
4201 
4202 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4203 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4204 
4205 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4206 	dcmd->cmd = MFI_CMD_DCMD;
4207 	dcmd->cmd_status = 0xFF;
4208 	dcmd->sge_count = 1;
4209 	dcmd->flags = MFI_FRAME_DIR_READ;
4210 	dcmd->timeout = 0;
4211 	dcmd->pad_0 = 0;
4212 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4213 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4214 
4215 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4216 				 sizeof(struct MR_PD_INFO));
4217 
4218 	if ((instance->adapter_type != MFI_SERIES) &&
4219 	    !instance->mask_interrupts)
4220 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4221 	else
4222 		ret = megasas_issue_polled(instance, cmd);
4223 
4224 	switch (ret) {
4225 	case DCMD_SUCCESS:
4226 		mr_device_priv_data = sdev->hostdata;
4227 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4228 		mr_device_priv_data->interface_type =
4229 				instance->pd_info->state.ddf.pdType.intf;
4230 		break;
4231 
4232 	case DCMD_TIMEOUT:
4233 
4234 		switch (dcmd_timeout_ocr_possible(instance)) {
4235 		case INITIATE_OCR:
4236 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4237 			megasas_reset_fusion(instance->host,
4238 				MFI_IO_TIMEOUT_OCR);
4239 			break;
4240 		case KILL_ADAPTER:
4241 			megaraid_sas_kill_hba(instance);
4242 			break;
4243 		case IGNORE_TIMEOUT:
4244 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4245 				__func__, __LINE__);
4246 			break;
4247 		}
4248 
4249 		break;
4250 	}
4251 
4252 	if (ret != DCMD_TIMEOUT)
4253 		megasas_return_cmd(instance, cmd);
4254 
4255 	return;
4256 }
4257 /*
4258  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4259  * @instance:				Adapter soft state
4260  * @pd_list:				pd_list structure
4261  *
4262  * Issues an internal command (DCMD) to get the FW's controller PD
4263  * list structure.  This information is mainly used to find out SYSTEM
4264  * supported by the FW.
4265  */
4266 static int
megasas_get_pd_list(struct megasas_instance * instance)4267 megasas_get_pd_list(struct megasas_instance *instance)
4268 {
4269 	int ret = 0, pd_index = 0;
4270 	struct megasas_cmd *cmd;
4271 	struct megasas_dcmd_frame *dcmd;
4272 	struct MR_PD_LIST *ci;
4273 	struct MR_PD_ADDRESS *pd_addr;
4274 	dma_addr_t ci_h = 0;
4275 
4276 	if (instance->pd_list_not_supported) {
4277 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4278 		"not supported by firmware\n");
4279 		return ret;
4280 	}
4281 
4282 	ci = instance->pd_list_buf;
4283 	ci_h = instance->pd_list_buf_h;
4284 
4285 	cmd = megasas_get_cmd(instance);
4286 
4287 	if (!cmd) {
4288 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4289 		return -ENOMEM;
4290 	}
4291 
4292 	dcmd = &cmd->frame->dcmd;
4293 
4294 	memset(ci, 0, sizeof(*ci));
4295 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4296 
4297 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4298 	dcmd->mbox.b[1] = 0;
4299 	dcmd->cmd = MFI_CMD_DCMD;
4300 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4301 	dcmd->sge_count = 1;
4302 	dcmd->flags = MFI_FRAME_DIR_READ;
4303 	dcmd->timeout = 0;
4304 	dcmd->pad_0 = 0;
4305 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4306 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4307 
4308 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4309 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4310 
4311 	if ((instance->adapter_type != MFI_SERIES) &&
4312 	    !instance->mask_interrupts)
4313 		ret = megasas_issue_blocked_cmd(instance, cmd,
4314 			MFI_IO_TIMEOUT_SECS);
4315 	else
4316 		ret = megasas_issue_polled(instance, cmd);
4317 
4318 	switch (ret) {
4319 	case DCMD_FAILED:
4320 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4321 			"failed/not supported by firmware\n");
4322 
4323 		if (instance->adapter_type != MFI_SERIES)
4324 			megaraid_sas_kill_hba(instance);
4325 		else
4326 			instance->pd_list_not_supported = 1;
4327 		break;
4328 	case DCMD_TIMEOUT:
4329 
4330 		switch (dcmd_timeout_ocr_possible(instance)) {
4331 		case INITIATE_OCR:
4332 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4333 			/*
4334 			 * DCMD failed from AEN path.
4335 			 * AEN path already hold reset_mutex to avoid PCI access
4336 			 * while OCR is in progress.
4337 			 */
4338 			mutex_unlock(&instance->reset_mutex);
4339 			megasas_reset_fusion(instance->host,
4340 						MFI_IO_TIMEOUT_OCR);
4341 			mutex_lock(&instance->reset_mutex);
4342 			break;
4343 		case KILL_ADAPTER:
4344 			megaraid_sas_kill_hba(instance);
4345 			break;
4346 		case IGNORE_TIMEOUT:
4347 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4348 				__func__, __LINE__);
4349 			break;
4350 		}
4351 
4352 		break;
4353 
4354 	case DCMD_SUCCESS:
4355 		pd_addr = ci->addr;
4356 
4357 		if ((le32_to_cpu(ci->count) >
4358 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4359 			break;
4360 
4361 		memset(instance->local_pd_list, 0,
4362 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4363 
4364 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4365 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4366 					le16_to_cpu(pd_addr->deviceId);
4367 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4368 					pd_addr->scsiDevType;
4369 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4370 					MR_PD_STATE_SYSTEM;
4371 			pd_addr++;
4372 		}
4373 
4374 		memcpy(instance->pd_list, instance->local_pd_list,
4375 			sizeof(instance->pd_list));
4376 		break;
4377 
4378 	}
4379 
4380 	if (ret != DCMD_TIMEOUT)
4381 		megasas_return_cmd(instance, cmd);
4382 
4383 	return ret;
4384 }
4385 
4386 /*
4387  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4388  * @instance:				Adapter soft state
4389  * @ld_list:				ld_list structure
4390  *
4391  * Issues an internal command (DCMD) to get the FW's controller PD
4392  * list structure.  This information is mainly used to find out SYSTEM
4393  * supported by the FW.
4394  */
4395 static int
megasas_get_ld_list(struct megasas_instance * instance)4396 megasas_get_ld_list(struct megasas_instance *instance)
4397 {
4398 	int ret = 0, ld_index = 0, ids = 0;
4399 	struct megasas_cmd *cmd;
4400 	struct megasas_dcmd_frame *dcmd;
4401 	struct MR_LD_LIST *ci;
4402 	dma_addr_t ci_h = 0;
4403 	u32 ld_count;
4404 
4405 	ci = instance->ld_list_buf;
4406 	ci_h = instance->ld_list_buf_h;
4407 
4408 	cmd = megasas_get_cmd(instance);
4409 
4410 	if (!cmd) {
4411 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4412 		return -ENOMEM;
4413 	}
4414 
4415 	dcmd = &cmd->frame->dcmd;
4416 
4417 	memset(ci, 0, sizeof(*ci));
4418 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4419 
4420 	if (instance->supportmax256vd)
4421 		dcmd->mbox.b[0] = 1;
4422 	dcmd->cmd = MFI_CMD_DCMD;
4423 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4424 	dcmd->sge_count = 1;
4425 	dcmd->flags = MFI_FRAME_DIR_READ;
4426 	dcmd->timeout = 0;
4427 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4428 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4429 	dcmd->pad_0  = 0;
4430 
4431 	megasas_set_dma_settings(instance, dcmd, ci_h,
4432 				 sizeof(struct MR_LD_LIST));
4433 
4434 	if ((instance->adapter_type != MFI_SERIES) &&
4435 	    !instance->mask_interrupts)
4436 		ret = megasas_issue_blocked_cmd(instance, cmd,
4437 			MFI_IO_TIMEOUT_SECS);
4438 	else
4439 		ret = megasas_issue_polled(instance, cmd);
4440 
4441 	ld_count = le32_to_cpu(ci->ldCount);
4442 
4443 	switch (ret) {
4444 	case DCMD_FAILED:
4445 		megaraid_sas_kill_hba(instance);
4446 		break;
4447 	case DCMD_TIMEOUT:
4448 
4449 		switch (dcmd_timeout_ocr_possible(instance)) {
4450 		case INITIATE_OCR:
4451 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4452 			/*
4453 			 * DCMD failed from AEN path.
4454 			 * AEN path already hold reset_mutex to avoid PCI access
4455 			 * while OCR is in progress.
4456 			 */
4457 			mutex_unlock(&instance->reset_mutex);
4458 			megasas_reset_fusion(instance->host,
4459 						MFI_IO_TIMEOUT_OCR);
4460 			mutex_lock(&instance->reset_mutex);
4461 			break;
4462 		case KILL_ADAPTER:
4463 			megaraid_sas_kill_hba(instance);
4464 			break;
4465 		case IGNORE_TIMEOUT:
4466 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4467 				__func__, __LINE__);
4468 			break;
4469 		}
4470 
4471 		break;
4472 
4473 	case DCMD_SUCCESS:
4474 		if (ld_count > instance->fw_supported_vd_count)
4475 			break;
4476 
4477 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4478 
4479 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4480 			if (ci->ldList[ld_index].state != 0) {
4481 				ids = ci->ldList[ld_index].ref.targetId;
4482 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4483 			}
4484 		}
4485 
4486 		break;
4487 	}
4488 
4489 	if (ret != DCMD_TIMEOUT)
4490 		megasas_return_cmd(instance, cmd);
4491 
4492 	return ret;
4493 }
4494 
4495 /**
4496  * megasas_ld_list_query -	Returns FW's ld_list structure
4497  * @instance:				Adapter soft state
4498  * @ld_list:				ld_list structure
4499  *
4500  * Issues an internal command (DCMD) to get the FW's controller PD
4501  * list structure.  This information is mainly used to find out SYSTEM
4502  * supported by the FW.
4503  */
4504 static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4505 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4506 {
4507 	int ret = 0, ld_index = 0, ids = 0;
4508 	struct megasas_cmd *cmd;
4509 	struct megasas_dcmd_frame *dcmd;
4510 	struct MR_LD_TARGETID_LIST *ci;
4511 	dma_addr_t ci_h = 0;
4512 	u32 tgtid_count;
4513 
4514 	ci = instance->ld_targetid_list_buf;
4515 	ci_h = instance->ld_targetid_list_buf_h;
4516 
4517 	cmd = megasas_get_cmd(instance);
4518 
4519 	if (!cmd) {
4520 		dev_warn(&instance->pdev->dev,
4521 		         "megasas_ld_list_query: Failed to get cmd\n");
4522 		return -ENOMEM;
4523 	}
4524 
4525 	dcmd = &cmd->frame->dcmd;
4526 
4527 	memset(ci, 0, sizeof(*ci));
4528 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4529 
4530 	dcmd->mbox.b[0] = query_type;
4531 	if (instance->supportmax256vd)
4532 		dcmd->mbox.b[2] = 1;
4533 
4534 	dcmd->cmd = MFI_CMD_DCMD;
4535 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4536 	dcmd->sge_count = 1;
4537 	dcmd->flags = MFI_FRAME_DIR_READ;
4538 	dcmd->timeout = 0;
4539 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4540 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4541 	dcmd->pad_0  = 0;
4542 
4543 	megasas_set_dma_settings(instance, dcmd, ci_h,
4544 				 sizeof(struct MR_LD_TARGETID_LIST));
4545 
4546 	if ((instance->adapter_type != MFI_SERIES) &&
4547 	    !instance->mask_interrupts)
4548 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4549 	else
4550 		ret = megasas_issue_polled(instance, cmd);
4551 
4552 	switch (ret) {
4553 	case DCMD_FAILED:
4554 		dev_info(&instance->pdev->dev,
4555 			"DCMD not supported by firmware - %s %d\n",
4556 				__func__, __LINE__);
4557 		ret = megasas_get_ld_list(instance);
4558 		break;
4559 	case DCMD_TIMEOUT:
4560 		switch (dcmd_timeout_ocr_possible(instance)) {
4561 		case INITIATE_OCR:
4562 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4563 			/*
4564 			 * DCMD failed from AEN path.
4565 			 * AEN path already hold reset_mutex to avoid PCI access
4566 			 * while OCR is in progress.
4567 			 */
4568 			mutex_unlock(&instance->reset_mutex);
4569 			megasas_reset_fusion(instance->host,
4570 						MFI_IO_TIMEOUT_OCR);
4571 			mutex_lock(&instance->reset_mutex);
4572 			break;
4573 		case KILL_ADAPTER:
4574 			megaraid_sas_kill_hba(instance);
4575 			break;
4576 		case IGNORE_TIMEOUT:
4577 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4578 				__func__, __LINE__);
4579 			break;
4580 		}
4581 
4582 		break;
4583 	case DCMD_SUCCESS:
4584 		tgtid_count = le32_to_cpu(ci->count);
4585 
4586 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4587 			break;
4588 
4589 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4590 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4591 			ids = ci->targetId[ld_index];
4592 			instance->ld_ids[ids] = ci->targetId[ld_index];
4593 		}
4594 
4595 		break;
4596 	}
4597 
4598 	if (ret != DCMD_TIMEOUT)
4599 		megasas_return_cmd(instance, cmd);
4600 
4601 	return ret;
4602 }
4603 
4604 /*
4605  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4606  * instance			 : Controller's instance
4607 */
megasas_update_ext_vd_details(struct megasas_instance * instance)4608 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4609 {
4610 	struct fusion_context *fusion;
4611 	u32 ventura_map_sz = 0;
4612 
4613 	fusion = instance->ctrl_context;
4614 	/* For MFI based controllers return dummy success */
4615 	if (!fusion)
4616 		return;
4617 
4618 	instance->supportmax256vd =
4619 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4620 	/* Below is additional check to address future FW enhancement */
4621 	if (instance->ctrl_info_buf->max_lds > 64)
4622 		instance->supportmax256vd = 1;
4623 
4624 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4625 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4626 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4627 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4628 	if (instance->supportmax256vd) {
4629 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4630 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4631 	} else {
4632 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4633 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4634 	}
4635 
4636 	dev_info(&instance->pdev->dev,
4637 		"firmware type\t: %s\n",
4638 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4639 		"Legacy(64 VD) firmware");
4640 
4641 	if (instance->max_raid_mapsize) {
4642 		ventura_map_sz = instance->max_raid_mapsize *
4643 						MR_MIN_MAP_SIZE; /* 64k */
4644 		fusion->current_map_sz = ventura_map_sz;
4645 		fusion->max_map_sz = ventura_map_sz;
4646 	} else {
4647 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4648 					(sizeof(struct MR_LD_SPAN_MAP) *
4649 					(instance->fw_supported_vd_count - 1));
4650 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4651 
4652 		fusion->max_map_sz =
4653 			max(fusion->old_map_sz, fusion->new_map_sz);
4654 
4655 		if (instance->supportmax256vd)
4656 			fusion->current_map_sz = fusion->new_map_sz;
4657 		else
4658 			fusion->current_map_sz = fusion->old_map_sz;
4659 	}
4660 	/* irrespective of FW raid maps, driver raid map is constant */
4661 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4662 }
4663 
4664 /**
4665  * megasas_get_controller_info -	Returns FW's controller structure
4666  * @instance:				Adapter soft state
4667  *
4668  * Issues an internal command (DCMD) to get the FW's controller structure.
4669  * This information is mainly used to find out the maximum IO transfer per
4670  * command supported by the FW.
4671  */
4672 int
megasas_get_ctrl_info(struct megasas_instance * instance)4673 megasas_get_ctrl_info(struct megasas_instance *instance)
4674 {
4675 	int ret = 0;
4676 	struct megasas_cmd *cmd;
4677 	struct megasas_dcmd_frame *dcmd;
4678 	struct megasas_ctrl_info *ci;
4679 	dma_addr_t ci_h = 0;
4680 
4681 	ci = instance->ctrl_info_buf;
4682 	ci_h = instance->ctrl_info_buf_h;
4683 
4684 	cmd = megasas_get_cmd(instance);
4685 
4686 	if (!cmd) {
4687 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4688 		return -ENOMEM;
4689 	}
4690 
4691 	dcmd = &cmd->frame->dcmd;
4692 
4693 	memset(ci, 0, sizeof(*ci));
4694 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4695 
4696 	dcmd->cmd = MFI_CMD_DCMD;
4697 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4698 	dcmd->sge_count = 1;
4699 	dcmd->flags = MFI_FRAME_DIR_READ;
4700 	dcmd->timeout = 0;
4701 	dcmd->pad_0 = 0;
4702 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4703 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4704 	dcmd->mbox.b[0] = 1;
4705 
4706 	megasas_set_dma_settings(instance, dcmd, ci_h,
4707 				 sizeof(struct megasas_ctrl_info));
4708 
4709 	if ((instance->adapter_type != MFI_SERIES) &&
4710 	    !instance->mask_interrupts) {
4711 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4712 	} else {
4713 		ret = megasas_issue_polled(instance, cmd);
4714 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4715 	}
4716 
4717 	switch (ret) {
4718 	case DCMD_SUCCESS:
4719 		/* Save required controller information in
4720 		 * CPU endianness format.
4721 		 */
4722 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4723 		le32_to_cpus((u32 *)&ci->adapterOperations2);
4724 		le32_to_cpus((u32 *)&ci->adapterOperations3);
4725 		le16_to_cpus((u16 *)&ci->adapter_operations4);
4726 
4727 		/* Update the latest Ext VD info.
4728 		 * From Init path, store current firmware details.
4729 		 * From OCR path, detect any firmware properties changes.
4730 		 * in case of Firmware upgrade without system reboot.
4731 		 */
4732 		megasas_update_ext_vd_details(instance);
4733 		instance->use_seqnum_jbod_fp =
4734 			ci->adapterOperations3.useSeqNumJbodFP;
4735 		instance->support_morethan256jbod =
4736 			ci->adapter_operations4.support_pd_map_target_id;
4737 		instance->support_nvme_passthru =
4738 			ci->adapter_operations4.support_nvme_passthru;
4739 		instance->task_abort_tmo = ci->TaskAbortTO;
4740 		instance->max_reset_tmo = ci->MaxResetTO;
4741 
4742 		/*Check whether controller is iMR or MR */
4743 		instance->is_imr = (ci->memory_size ? 0 : 1);
4744 		dev_info(&instance->pdev->dev,
4745 			"controller type\t: %s(%dMB)\n",
4746 			instance->is_imr ? "iMR" : "MR",
4747 			le16_to_cpu(ci->memory_size));
4748 
4749 		instance->disableOnlineCtrlReset =
4750 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
4751 		instance->secure_jbod_support =
4752 			ci->adapterOperations3.supportSecurityonJBOD;
4753 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4754 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4755 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4756 			instance->secure_jbod_support ? "Yes" : "No");
4757 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
4758 			 instance->support_nvme_passthru ? "Yes" : "No");
4759 		dev_info(&instance->pdev->dev,
4760 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
4761 			 instance->task_abort_tmo, instance->max_reset_tmo);
4762 
4763 		break;
4764 
4765 	case DCMD_TIMEOUT:
4766 		switch (dcmd_timeout_ocr_possible(instance)) {
4767 		case INITIATE_OCR:
4768 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4769 			megasas_reset_fusion(instance->host,
4770 				MFI_IO_TIMEOUT_OCR);
4771 			break;
4772 		case KILL_ADAPTER:
4773 			megaraid_sas_kill_hba(instance);
4774 			break;
4775 		case IGNORE_TIMEOUT:
4776 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4777 				__func__, __LINE__);
4778 			break;
4779 		}
4780 		break;
4781 	case DCMD_FAILED:
4782 		megaraid_sas_kill_hba(instance);
4783 		break;
4784 
4785 	}
4786 
4787 	if (ret != DCMD_TIMEOUT)
4788 		megasas_return_cmd(instance, cmd);
4789 
4790 	return ret;
4791 }
4792 
4793 /*
4794  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
4795  *					to firmware
4796  *
4797  * @instance:				Adapter soft state
4798  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
4799 					MR_CRASH_BUF_TURN_OFF = 0
4800 					MR_CRASH_BUF_TURN_ON = 1
4801  * @return 0 on success non-zero on failure.
4802  * Issues an internal command (DCMD) to set parameters for crash dump feature.
4803  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4804  * that driver supports crash dump feature. This DCMD will be sent only if
4805  * crash dump feature is supported by the FW.
4806  *
4807  */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)4808 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4809 	u8 crash_buf_state)
4810 {
4811 	int ret = 0;
4812 	struct megasas_cmd *cmd;
4813 	struct megasas_dcmd_frame *dcmd;
4814 
4815 	cmd = megasas_get_cmd(instance);
4816 
4817 	if (!cmd) {
4818 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4819 		return -ENOMEM;
4820 	}
4821 
4822 
4823 	dcmd = &cmd->frame->dcmd;
4824 
4825 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4826 	dcmd->mbox.b[0] = crash_buf_state;
4827 	dcmd->cmd = MFI_CMD_DCMD;
4828 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4829 	dcmd->sge_count = 1;
4830 	dcmd->flags = MFI_FRAME_DIR_NONE;
4831 	dcmd->timeout = 0;
4832 	dcmd->pad_0 = 0;
4833 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4834 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4835 
4836 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
4837 				 CRASH_DMA_BUF_SIZE);
4838 
4839 	if ((instance->adapter_type != MFI_SERIES) &&
4840 	    !instance->mask_interrupts)
4841 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4842 	else
4843 		ret = megasas_issue_polled(instance, cmd);
4844 
4845 	if (ret == DCMD_TIMEOUT) {
4846 		switch (dcmd_timeout_ocr_possible(instance)) {
4847 		case INITIATE_OCR:
4848 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4849 			megasas_reset_fusion(instance->host,
4850 					MFI_IO_TIMEOUT_OCR);
4851 			break;
4852 		case KILL_ADAPTER:
4853 			megaraid_sas_kill_hba(instance);
4854 			break;
4855 		case IGNORE_TIMEOUT:
4856 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4857 				__func__, __LINE__);
4858 			break;
4859 		}
4860 	} else
4861 		megasas_return_cmd(instance, cmd);
4862 
4863 	return ret;
4864 }
4865 
4866 /**
4867  * megasas_issue_init_mfi -	Initializes the FW
4868  * @instance:		Adapter soft state
4869  *
4870  * Issues the INIT MFI cmd
4871  */
4872 static int
megasas_issue_init_mfi(struct megasas_instance * instance)4873 megasas_issue_init_mfi(struct megasas_instance *instance)
4874 {
4875 	__le32 context;
4876 	struct megasas_cmd *cmd;
4877 	struct megasas_init_frame *init_frame;
4878 	struct megasas_init_queue_info *initq_info;
4879 	dma_addr_t init_frame_h;
4880 	dma_addr_t initq_info_h;
4881 
4882 	/*
4883 	 * Prepare a init frame. Note the init frame points to queue info
4884 	 * structure. Each frame has SGL allocated after first 64 bytes. For
4885 	 * this frame - since we don't need any SGL - we use SGL's space as
4886 	 * queue info structure
4887 	 *
4888 	 * We will not get a NULL command below. We just created the pool.
4889 	 */
4890 	cmd = megasas_get_cmd(instance);
4891 
4892 	init_frame = (struct megasas_init_frame *)cmd->frame;
4893 	initq_info = (struct megasas_init_queue_info *)
4894 		((unsigned long)init_frame + 64);
4895 
4896 	init_frame_h = cmd->frame_phys_addr;
4897 	initq_info_h = init_frame_h + 64;
4898 
4899 	context = init_frame->context;
4900 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4901 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4902 	init_frame->context = context;
4903 
4904 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4905 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4906 
4907 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4908 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4909 
4910 	init_frame->cmd = MFI_CMD_INIT;
4911 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4912 	init_frame->queue_info_new_phys_addr_lo =
4913 		cpu_to_le32(lower_32_bits(initq_info_h));
4914 	init_frame->queue_info_new_phys_addr_hi =
4915 		cpu_to_le32(upper_32_bits(initq_info_h));
4916 
4917 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4918 
4919 	/*
4920 	 * disable the intr before firing the init frame to FW
4921 	 */
4922 	instance->instancet->disable_intr(instance);
4923 
4924 	/*
4925 	 * Issue the init frame in polled mode
4926 	 */
4927 
4928 	if (megasas_issue_polled(instance, cmd)) {
4929 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4930 		megasas_return_cmd(instance, cmd);
4931 		goto fail_fw_init;
4932 	}
4933 
4934 	megasas_return_cmd(instance, cmd);
4935 
4936 	return 0;
4937 
4938 fail_fw_init:
4939 	return -EINVAL;
4940 }
4941 
4942 static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)4943 megasas_init_adapter_mfi(struct megasas_instance *instance)
4944 {
4945 	struct megasas_register_set __iomem *reg_set;
4946 	u32 context_sz;
4947 	u32 reply_q_sz;
4948 
4949 	reg_set = instance->reg_set;
4950 
4951 	/*
4952 	 * Get various operational parameters from status register
4953 	 */
4954 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4955 	/*
4956 	 * Reduce the max supported cmds by 1. This is to ensure that the
4957 	 * reply_q_sz (1 more than the max cmd that driver may send)
4958 	 * does not exceed max cmds that the FW can support
4959 	 */
4960 	instance->max_fw_cmds = instance->max_fw_cmds-1;
4961 	instance->max_mfi_cmds = instance->max_fw_cmds;
4962 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4963 					0x10;
4964 	/*
4965 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4966 	 * are reserved for IOCTL + driver's internal DCMDs.
4967 	 */
4968 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4969 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4970 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4971 			MEGASAS_SKINNY_INT_CMDS);
4972 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4973 	} else {
4974 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4975 			MEGASAS_INT_CMDS);
4976 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4977 	}
4978 
4979 	instance->cur_can_queue = instance->max_scsi_cmds;
4980 	/*
4981 	 * Create a pool of commands
4982 	 */
4983 	if (megasas_alloc_cmds(instance))
4984 		goto fail_alloc_cmds;
4985 
4986 	/*
4987 	 * Allocate memory for reply queue. Length of reply queue should
4988 	 * be _one_ more than the maximum commands handled by the firmware.
4989 	 *
4990 	 * Note: When FW completes commands, it places corresponding contex
4991 	 * values in this circular reply queue. This circular queue is a fairly
4992 	 * typical producer-consumer queue. FW is the producer (of completed
4993 	 * commands) and the driver is the consumer.
4994 	 */
4995 	context_sz = sizeof(u32);
4996 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4997 
4998 	instance->reply_queue = pci_alloc_consistent(instance->pdev,
4999 						     reply_q_sz,
5000 						     &instance->reply_queue_h);
5001 
5002 	if (!instance->reply_queue) {
5003 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5004 		goto fail_reply_queue;
5005 	}
5006 
5007 	if (megasas_issue_init_mfi(instance))
5008 		goto fail_fw_init;
5009 
5010 	if (megasas_get_ctrl_info(instance)) {
5011 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5012 			"Fail from %s %d\n", instance->unique_id,
5013 			__func__, __LINE__);
5014 		goto fail_fw_init;
5015 	}
5016 
5017 	instance->fw_support_ieee = 0;
5018 	instance->fw_support_ieee =
5019 		(instance->instancet->read_fw_status_reg(reg_set) &
5020 		0x04000000);
5021 
5022 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5023 			instance->fw_support_ieee);
5024 
5025 	if (instance->fw_support_ieee)
5026 		instance->flag_ieee = 1;
5027 
5028 	return 0;
5029 
5030 fail_fw_init:
5031 
5032 	pci_free_consistent(instance->pdev, reply_q_sz,
5033 			    instance->reply_queue, instance->reply_queue_h);
5034 fail_reply_queue:
5035 	megasas_free_cmds(instance);
5036 
5037 fail_alloc_cmds:
5038 	return 1;
5039 }
5040 
5041 /*
5042  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5043  * @instance:				Adapter soft state
5044  *
5045  * Do not enable interrupt, only setup ISRs.
5046  *
5047  * Return 0 on success.
5048  */
5049 static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5050 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5051 {
5052 	struct pci_dev *pdev;
5053 
5054 	pdev = instance->pdev;
5055 	instance->irq_context[0].instance = instance;
5056 	instance->irq_context[0].MSIxIndex = 0;
5057 	if (request_irq(pci_irq_vector(pdev, 0),
5058 			instance->instancet->service_isr, IRQF_SHARED,
5059 			"megasas", &instance->irq_context[0])) {
5060 		dev_err(&instance->pdev->dev,
5061 				"Failed to register IRQ from %s %d\n",
5062 				__func__, __LINE__);
5063 		return -1;
5064 	}
5065 	return 0;
5066 }
5067 
5068 /**
5069  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5070  * @instance:				Adapter soft state
5071  * @is_probe:				Driver probe check
5072  *
5073  * Do not enable interrupt, only setup ISRs.
5074  *
5075  * Return 0 on success.
5076  */
5077 static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5078 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5079 {
5080 	int i, j;
5081 	struct pci_dev *pdev;
5082 
5083 	pdev = instance->pdev;
5084 
5085 	/* Try MSI-x */
5086 	for (i = 0; i < instance->msix_vectors; i++) {
5087 		instance->irq_context[i].instance = instance;
5088 		instance->irq_context[i].MSIxIndex = i;
5089 		if (request_irq(pci_irq_vector(pdev, i),
5090 			instance->instancet->service_isr, 0, "megasas",
5091 			&instance->irq_context[i])) {
5092 			dev_err(&instance->pdev->dev,
5093 				"Failed to register IRQ for vector %d.\n", i);
5094 			for (j = 0; j < i; j++)
5095 				free_irq(pci_irq_vector(pdev, j),
5096 					 &instance->irq_context[j]);
5097 			/* Retry irq register for IO_APIC*/
5098 			instance->msix_vectors = 0;
5099 			if (is_probe) {
5100 				pci_free_irq_vectors(instance->pdev);
5101 				return megasas_setup_irqs_ioapic(instance);
5102 			} else {
5103 				return -1;
5104 			}
5105 		}
5106 	}
5107 	return 0;
5108 }
5109 
5110 /*
5111  * megasas_destroy_irqs-		unregister interrupts.
5112  * @instance:				Adapter soft state
5113  * return:				void
5114  */
5115 static void
megasas_destroy_irqs(struct megasas_instance * instance)5116 megasas_destroy_irqs(struct megasas_instance *instance) {
5117 
5118 	int i;
5119 
5120 	if (instance->msix_vectors)
5121 		for (i = 0; i < instance->msix_vectors; i++) {
5122 			free_irq(pci_irq_vector(instance->pdev, i),
5123 				 &instance->irq_context[i]);
5124 		}
5125 	else
5126 		free_irq(pci_irq_vector(instance->pdev, 0),
5127 			 &instance->irq_context[0]);
5128 }
5129 
5130 /**
5131  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5132  * @instance:				Adapter soft state
5133  * @is_probe:				Driver probe check
5134  *
5135  * Return 0 on success.
5136  */
5137 void
megasas_setup_jbod_map(struct megasas_instance * instance)5138 megasas_setup_jbod_map(struct megasas_instance *instance)
5139 {
5140 	int i;
5141 	struct fusion_context *fusion = instance->ctrl_context;
5142 	u32 pd_seq_map_sz;
5143 
5144 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5145 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5146 
5147 	if (reset_devices || !fusion ||
5148 		!instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5149 		dev_info(&instance->pdev->dev,
5150 			"Jbod map is not supported %s %d\n",
5151 			__func__, __LINE__);
5152 		instance->use_seqnum_jbod_fp = false;
5153 		return;
5154 	}
5155 
5156 	if (fusion->pd_seq_sync[0])
5157 		goto skip_alloc;
5158 
5159 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5160 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5161 			(&instance->pdev->dev, pd_seq_map_sz,
5162 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5163 		if (!fusion->pd_seq_sync[i]) {
5164 			dev_err(&instance->pdev->dev,
5165 				"Failed to allocate memory from %s %d\n",
5166 				__func__, __LINE__);
5167 			if (i == 1) {
5168 				dma_free_coherent(&instance->pdev->dev,
5169 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5170 					fusion->pd_seq_phys[0]);
5171 				fusion->pd_seq_sync[0] = NULL;
5172 			}
5173 			instance->use_seqnum_jbod_fp = false;
5174 			return;
5175 		}
5176 	}
5177 
5178 skip_alloc:
5179 	if (!megasas_sync_pd_seq_num(instance, false) &&
5180 		!megasas_sync_pd_seq_num(instance, true))
5181 		instance->use_seqnum_jbod_fp = true;
5182 	else
5183 		instance->use_seqnum_jbod_fp = false;
5184 }
5185 
megasas_setup_reply_map(struct megasas_instance * instance)5186 static void megasas_setup_reply_map(struct megasas_instance *instance)
5187 {
5188 	const struct cpumask *mask;
5189 	unsigned int queue, cpu;
5190 
5191 	for (queue = 0; queue < instance->msix_vectors; queue++) {
5192 		mask = pci_irq_get_affinity(instance->pdev, queue);
5193 		if (!mask)
5194 			goto fallback;
5195 
5196 		for_each_cpu(cpu, mask)
5197 			instance->reply_map[cpu] = queue;
5198 	}
5199 	return;
5200 
5201 fallback:
5202 	for_each_possible_cpu(cpu)
5203 		instance->reply_map[cpu] = cpu % instance->msix_vectors;
5204 }
5205 
5206 /**
5207  * megasas_init_fw -	Initializes the FW
5208  * @instance:		Adapter soft state
5209  *
5210  * This is the main function for initializing firmware
5211  */
5212 
megasas_init_fw(struct megasas_instance * instance)5213 static int megasas_init_fw(struct megasas_instance *instance)
5214 {
5215 	u32 max_sectors_1;
5216 	u32 max_sectors_2, tmp_sectors, msix_enable;
5217 	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5218 	resource_size_t base_addr;
5219 	struct megasas_register_set __iomem *reg_set;
5220 	struct megasas_ctrl_info *ctrl_info = NULL;
5221 	unsigned long bar_list;
5222 	int i, j, loop, fw_msix_count = 0;
5223 	struct IOV_111 *iovPtr;
5224 	struct fusion_context *fusion;
5225 
5226 	fusion = instance->ctrl_context;
5227 
5228 	/* Find first memory bar */
5229 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5230 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5231 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5232 					 "megasas: LSI")) {
5233 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5234 		return -EBUSY;
5235 	}
5236 
5237 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5238 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5239 
5240 	if (!instance->reg_set) {
5241 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5242 		goto fail_ioremap;
5243 	}
5244 
5245 	reg_set = instance->reg_set;
5246 
5247 	if (instance->adapter_type != MFI_SERIES)
5248 		instance->instancet = &megasas_instance_template_fusion;
5249 	else {
5250 		switch (instance->pdev->device) {
5251 		case PCI_DEVICE_ID_LSI_SAS1078R:
5252 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5253 			instance->instancet = &megasas_instance_template_ppc;
5254 			break;
5255 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5256 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5257 			instance->instancet = &megasas_instance_template_gen2;
5258 			break;
5259 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5260 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5261 			instance->instancet = &megasas_instance_template_skinny;
5262 			break;
5263 		case PCI_DEVICE_ID_LSI_SAS1064R:
5264 		case PCI_DEVICE_ID_DELL_PERC5:
5265 		default:
5266 			instance->instancet = &megasas_instance_template_xscale;
5267 			instance->pd_list_not_supported = 1;
5268 			break;
5269 		}
5270 	}
5271 
5272 	if (megasas_transition_to_ready(instance, 0)) {
5273 		atomic_set(&instance->fw_reset_no_pci_access, 1);
5274 		instance->instancet->adp_reset
5275 			(instance, instance->reg_set);
5276 		atomic_set(&instance->fw_reset_no_pci_access, 0);
5277 		dev_info(&instance->pdev->dev,
5278 			"FW restarted successfully from %s!\n",
5279 			__func__);
5280 
5281 		/*waitting for about 30 second before retry*/
5282 		ssleep(30);
5283 
5284 		if (megasas_transition_to_ready(instance, 0))
5285 			goto fail_ready_state;
5286 	}
5287 
5288 	megasas_init_ctrl_params(instance);
5289 
5290 	if (megasas_set_dma_mask(instance))
5291 		goto fail_ready_state;
5292 
5293 	if (megasas_alloc_ctrl_mem(instance))
5294 		goto fail_alloc_dma_buf;
5295 
5296 	if (megasas_alloc_ctrl_dma_buffers(instance))
5297 		goto fail_alloc_dma_buf;
5298 
5299 	fusion = instance->ctrl_context;
5300 
5301 	if (instance->adapter_type == VENTURA_SERIES) {
5302 		scratch_pad_3 =
5303 			readl(&instance->reg_set->outbound_scratch_pad_3);
5304 		instance->max_raid_mapsize = ((scratch_pad_3 >>
5305 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5306 			MR_MAX_RAID_MAP_SIZE_MASK);
5307 	}
5308 
5309 	/* Check if MSI-X is supported while in ready state */
5310 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5311 		       0x4000000) >> 0x1a;
5312 	if (msix_enable && !msix_disable) {
5313 		int irq_flags = PCI_IRQ_MSIX;
5314 
5315 		scratch_pad_2 = readl
5316 			(&instance->reg_set->outbound_scratch_pad_2);
5317 		/* Check max MSI-X vectors */
5318 		if (fusion) {
5319 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5320 				/* Thunderbolt Series*/
5321 				instance->msix_vectors = (scratch_pad_2
5322 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5323 				fw_msix_count = instance->msix_vectors;
5324 			} else { /* Invader series supports more than 8 MSI-x vectors*/
5325 				instance->msix_vectors = ((scratch_pad_2
5326 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5327 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5328 				if (instance->msix_vectors > 16)
5329 					instance->msix_combined = true;
5330 
5331 				if (rdpq_enable)
5332 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5333 								1 : 0;
5334 				fw_msix_count = instance->msix_vectors;
5335 				/* Save 1-15 reply post index address to local memory
5336 				 * Index 0 is already saved from reg offset
5337 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5338 				 */
5339 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5340 					instance->reply_post_host_index_addr[loop] =
5341 						(u32 __iomem *)
5342 						((u8 __iomem *)instance->reg_set +
5343 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5344 						+ (loop * 0x10));
5345 				}
5346 			}
5347 			if (msix_vectors)
5348 				instance->msix_vectors = min(msix_vectors,
5349 					instance->msix_vectors);
5350 		} else /* MFI adapters */
5351 			instance->msix_vectors = 1;
5352 		/* Don't bother allocating more MSI-X vectors than cpus */
5353 		instance->msix_vectors = min(instance->msix_vectors,
5354 					     (unsigned int)num_online_cpus());
5355 		if (smp_affinity_enable)
5356 			irq_flags |= PCI_IRQ_AFFINITY;
5357 		i = pci_alloc_irq_vectors(instance->pdev, 1,
5358 					  instance->msix_vectors, irq_flags);
5359 		if (i > 0)
5360 			instance->msix_vectors = i;
5361 		else
5362 			instance->msix_vectors = 0;
5363 	}
5364 	/*
5365 	 * MSI-X host index 0 is common for all adapter.
5366 	 * It is used for all MPT based Adapters.
5367 	 */
5368 	if (instance->msix_combined) {
5369 		instance->reply_post_host_index_addr[0] =
5370 				(u32 *)((u8 *)instance->reg_set +
5371 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5372 	} else {
5373 		instance->reply_post_host_index_addr[0] =
5374 			(u32 *)((u8 *)instance->reg_set +
5375 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5376 	}
5377 
5378 	if (!instance->msix_vectors) {
5379 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5380 		if (i < 0)
5381 			goto fail_setup_irqs;
5382 	}
5383 
5384 	megasas_setup_reply_map(instance);
5385 
5386 	dev_info(&instance->pdev->dev,
5387 		"firmware supports msix\t: (%d)", fw_msix_count);
5388 	dev_info(&instance->pdev->dev,
5389 		"current msix/online cpus\t: (%d/%d)\n",
5390 		instance->msix_vectors, (unsigned int)num_online_cpus());
5391 	dev_info(&instance->pdev->dev,
5392 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5393 
5394 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5395 		(unsigned long)instance);
5396 
5397 	/*
5398 	 * Below are default value for legacy Firmware.
5399 	 * non-fusion based controllers
5400 	 */
5401 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5402 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5403 	/* Get operational params, sge flags, send init cmd to controller */
5404 	if (instance->instancet->init_adapter(instance))
5405 		goto fail_init_adapter;
5406 
5407 	if (instance->adapter_type == VENTURA_SERIES) {
5408 		scratch_pad_4 =
5409 			readl(&instance->reg_set->outbound_scratch_pad_4);
5410 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5411 			MR_DEFAULT_NVME_PAGE_SHIFT)
5412 			instance->nvme_page_size =
5413 				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5414 
5415 		dev_info(&instance->pdev->dev,
5416 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5417 	}
5418 
5419 	if (instance->msix_vectors ?
5420 		megasas_setup_irqs_msix(instance, 1) :
5421 		megasas_setup_irqs_ioapic(instance))
5422 		goto fail_init_adapter;
5423 
5424 	instance->instancet->enable_intr(instance);
5425 
5426 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
5427 
5428 	megasas_setup_jbod_map(instance);
5429 
5430 	/** for passthrough
5431 	 * the following function will get the PD LIST.
5432 	 */
5433 	memset(instance->pd_list, 0,
5434 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5435 	if (megasas_get_pd_list(instance) < 0) {
5436 		dev_err(&instance->pdev->dev, "failed to get PD list\n");
5437 		goto fail_get_ld_pd_list;
5438 	}
5439 
5440 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5441 
5442 	/* stream detection initialization */
5443 	if (instance->adapter_type == VENTURA_SERIES) {
5444 		fusion->stream_detect_by_ld =
5445 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
5446 				sizeof(struct LD_STREAM_DETECT *),
5447 				GFP_KERNEL);
5448 		if (!fusion->stream_detect_by_ld) {
5449 			dev_err(&instance->pdev->dev,
5450 				"unable to allocate stream detection for pool of LDs\n");
5451 			goto fail_get_ld_pd_list;
5452 		}
5453 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5454 			fusion->stream_detect_by_ld[i] =
5455 				kzalloc(sizeof(struct LD_STREAM_DETECT),
5456 				GFP_KERNEL);
5457 			if (!fusion->stream_detect_by_ld[i]) {
5458 				dev_err(&instance->pdev->dev,
5459 					"unable to allocate stream detect by LD\n ");
5460 				for (j = 0; j < i; ++j)
5461 					kfree(fusion->stream_detect_by_ld[j]);
5462 				kfree(fusion->stream_detect_by_ld);
5463 				fusion->stream_detect_by_ld = NULL;
5464 				goto fail_get_ld_pd_list;
5465 			}
5466 			fusion->stream_detect_by_ld[i]->mru_bit_map
5467 				= MR_STREAM_BITMAP;
5468 		}
5469 	}
5470 
5471 	if (megasas_ld_list_query(instance,
5472 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5473 		goto fail_get_ld_pd_list;
5474 
5475 	/*
5476 	 * Compute the max allowed sectors per IO: The controller info has two
5477 	 * limits on max sectors. Driver should use the minimum of these two.
5478 	 *
5479 	 * 1 << stripe_sz_ops.min = max sectors per strip
5480 	 *
5481 	 * Note that older firmwares ( < FW ver 30) didn't report information
5482 	 * to calculate max_sectors_1. So the number ended up as zero always.
5483 	 */
5484 	tmp_sectors = 0;
5485 	ctrl_info = instance->ctrl_info_buf;
5486 
5487 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5488 		le16_to_cpu(ctrl_info->max_strips_per_io);
5489 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5490 
5491 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5492 
5493 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5494 	instance->passive = ctrl_info->cluster.passive;
5495 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5496 	instance->UnevenSpanSupport =
5497 		ctrl_info->adapterOperations2.supportUnevenSpans;
5498 	if (instance->UnevenSpanSupport) {
5499 		struct fusion_context *fusion = instance->ctrl_context;
5500 		if (MR_ValidateMapInfo(instance, instance->map_id))
5501 			fusion->fast_path_io = 1;
5502 		else
5503 			fusion->fast_path_io = 0;
5504 
5505 	}
5506 	if (ctrl_info->host_interface.SRIOV) {
5507 		instance->requestorId = ctrl_info->iov.requestorId;
5508 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5509 			if (!ctrl_info->adapterOperations2.activePassive)
5510 			    instance->PlasmaFW111 = 1;
5511 
5512 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5513 			    instance->PlasmaFW111 ? "1.11" : "new");
5514 
5515 			if (instance->PlasmaFW111) {
5516 			    iovPtr = (struct IOV_111 *)
5517 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
5518 			    instance->requestorId = iovPtr->requestorId;
5519 			}
5520 		}
5521 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5522 			instance->requestorId);
5523 	}
5524 
5525 	instance->crash_dump_fw_support =
5526 		ctrl_info->adapterOperations3.supportCrashDump;
5527 	instance->crash_dump_drv_support =
5528 		(instance->crash_dump_fw_support &&
5529 		instance->crash_dump_buf);
5530 	if (instance->crash_dump_drv_support)
5531 		megasas_set_crash_dump_params(instance,
5532 			MR_CRASH_BUF_TURN_OFF);
5533 
5534 	else {
5535 		if (instance->crash_dump_buf)
5536 			pci_free_consistent(instance->pdev,
5537 				CRASH_DMA_BUF_SIZE,
5538 				instance->crash_dump_buf,
5539 				instance->crash_dump_h);
5540 		instance->crash_dump_buf = NULL;
5541 	}
5542 
5543 
5544 	dev_info(&instance->pdev->dev,
5545 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5546 		le16_to_cpu(ctrl_info->pci.vendor_id),
5547 		le16_to_cpu(ctrl_info->pci.device_id),
5548 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5549 		le16_to_cpu(ctrl_info->pci.sub_device_id));
5550 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
5551 		instance->UnevenSpanSupport ? "yes" : "no");
5552 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
5553 		instance->crash_dump_drv_support ? "yes" : "no");
5554 	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
5555 		instance->use_seqnum_jbod_fp ? "yes" : "no");
5556 
5557 
5558 	instance->max_sectors_per_req = instance->max_num_sge *
5559 						SGE_BUFFER_SIZE / 512;
5560 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5561 		instance->max_sectors_per_req = tmp_sectors;
5562 
5563 	/* Check for valid throttlequeuedepth module parameter */
5564 	if (throttlequeuedepth &&
5565 			throttlequeuedepth <= instance->max_scsi_cmds)
5566 		instance->throttlequeuedepth = throttlequeuedepth;
5567 	else
5568 		instance->throttlequeuedepth =
5569 				MEGASAS_THROTTLE_QUEUE_DEPTH;
5570 
5571 	if ((resetwaittime < 1) ||
5572 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5573 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
5574 
5575 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5576 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5577 
5578 	/* Launch SR-IOV heartbeat timer */
5579 	if (instance->requestorId) {
5580 		if (!megasas_sriov_start_heartbeat(instance, 1))
5581 			megasas_start_timer(instance);
5582 		else
5583 			instance->skip_heartbeat_timer_del = 1;
5584 	}
5585 
5586 	return 0;
5587 
5588 fail_get_ld_pd_list:
5589 	instance->instancet->disable_intr(instance);
5590 fail_init_adapter:
5591 	megasas_destroy_irqs(instance);
5592 fail_setup_irqs:
5593 	if (instance->msix_vectors)
5594 		pci_free_irq_vectors(instance->pdev);
5595 	instance->msix_vectors = 0;
5596 fail_alloc_dma_buf:
5597 	megasas_free_ctrl_dma_buffers(instance);
5598 	megasas_free_ctrl_mem(instance);
5599 fail_ready_state:
5600 	iounmap(instance->reg_set);
5601 
5602 fail_ioremap:
5603 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5604 
5605 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5606 		__func__, __LINE__);
5607 	return -EINVAL;
5608 }
5609 
5610 /**
5611  * megasas_release_mfi -	Reverses the FW initialization
5612  * @instance:			Adapter soft state
5613  */
megasas_release_mfi(struct megasas_instance * instance)5614 static void megasas_release_mfi(struct megasas_instance *instance)
5615 {
5616 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5617 
5618 	if (instance->reply_queue)
5619 		pci_free_consistent(instance->pdev, reply_q_sz,
5620 			    instance->reply_queue, instance->reply_queue_h);
5621 
5622 	megasas_free_cmds(instance);
5623 
5624 	iounmap(instance->reg_set);
5625 
5626 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5627 }
5628 
5629 /**
5630  * megasas_get_seq_num -	Gets latest event sequence numbers
5631  * @instance:			Adapter soft state
5632  * @eli:			FW event log sequence numbers information
5633  *
5634  * FW maintains a log of all events in a non-volatile area. Upper layers would
5635  * usually find out the latest sequence number of the events, the seq number at
5636  * the boot etc. They would "read" all the events below the latest seq number
5637  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5638  * number), they would subsribe to AEN (asynchronous event notification) and
5639  * wait for the events to happen.
5640  */
5641 static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)5642 megasas_get_seq_num(struct megasas_instance *instance,
5643 		    struct megasas_evt_log_info *eli)
5644 {
5645 	struct megasas_cmd *cmd;
5646 	struct megasas_dcmd_frame *dcmd;
5647 	struct megasas_evt_log_info *el_info;
5648 	dma_addr_t el_info_h = 0;
5649 	int ret;
5650 
5651 	cmd = megasas_get_cmd(instance);
5652 
5653 	if (!cmd) {
5654 		return -ENOMEM;
5655 	}
5656 
5657 	dcmd = &cmd->frame->dcmd;
5658 	el_info = pci_zalloc_consistent(instance->pdev,
5659 					sizeof(struct megasas_evt_log_info),
5660 					&el_info_h);
5661 
5662 	if (!el_info) {
5663 		megasas_return_cmd(instance, cmd);
5664 		return -ENOMEM;
5665 	}
5666 
5667 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5668 
5669 	dcmd->cmd = MFI_CMD_DCMD;
5670 	dcmd->cmd_status = 0x0;
5671 	dcmd->sge_count = 1;
5672 	dcmd->flags = MFI_FRAME_DIR_READ;
5673 	dcmd->timeout = 0;
5674 	dcmd->pad_0 = 0;
5675 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5676 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5677 
5678 	megasas_set_dma_settings(instance, dcmd, el_info_h,
5679 				 sizeof(struct megasas_evt_log_info));
5680 
5681 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5682 	if (ret != DCMD_SUCCESS) {
5683 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5684 			__func__, __LINE__);
5685 		goto dcmd_failed;
5686 	}
5687 
5688 	/*
5689 	 * Copy the data back into callers buffer
5690 	 */
5691 	eli->newest_seq_num = el_info->newest_seq_num;
5692 	eli->oldest_seq_num = el_info->oldest_seq_num;
5693 	eli->clear_seq_num = el_info->clear_seq_num;
5694 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
5695 	eli->boot_seq_num = el_info->boot_seq_num;
5696 
5697 dcmd_failed:
5698 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5699 			    el_info, el_info_h);
5700 
5701 	megasas_return_cmd(instance, cmd);
5702 
5703 	return ret;
5704 }
5705 
5706 /**
5707  * megasas_register_aen -	Registers for asynchronous event notification
5708  * @instance:			Adapter soft state
5709  * @seq_num:			The starting sequence number
5710  * @class_locale:		Class of the event
5711  *
5712  * This function subscribes for AEN for events beyond the @seq_num. It requests
5713  * to be notified if and only if the event is of type @class_locale
5714  */
5715 static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)5716 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5717 		     u32 class_locale_word)
5718 {
5719 	int ret_val;
5720 	struct megasas_cmd *cmd;
5721 	struct megasas_dcmd_frame *dcmd;
5722 	union megasas_evt_class_locale curr_aen;
5723 	union megasas_evt_class_locale prev_aen;
5724 
5725 	/*
5726 	 * If there an AEN pending already (aen_cmd), check if the
5727 	 * class_locale of that pending AEN is inclusive of the new
5728 	 * AEN request we currently have. If it is, then we don't have
5729 	 * to do anything. In other words, whichever events the current
5730 	 * AEN request is subscribing to, have already been subscribed
5731 	 * to.
5732 	 *
5733 	 * If the old_cmd is _not_ inclusive, then we have to abort
5734 	 * that command, form a class_locale that is superset of both
5735 	 * old and current and re-issue to the FW
5736 	 */
5737 
5738 	curr_aen.word = class_locale_word;
5739 
5740 	if (instance->aen_cmd) {
5741 
5742 		prev_aen.word =
5743 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5744 
5745 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5746 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5747 			dev_info(&instance->pdev->dev,
5748 				 "%s %d out of range class %d send by application\n",
5749 				 __func__, __LINE__, curr_aen.members.class);
5750 			return 0;
5751 		}
5752 
5753 		/*
5754 		 * A class whose enum value is smaller is inclusive of all
5755 		 * higher values. If a PROGRESS (= -1) was previously
5756 		 * registered, then a new registration requests for higher
5757 		 * classes need not be sent to FW. They are automatically
5758 		 * included.
5759 		 *
5760 		 * Locale numbers don't have such hierarchy. They are bitmap
5761 		 * values
5762 		 */
5763 		if ((prev_aen.members.class <= curr_aen.members.class) &&
5764 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
5765 		      curr_aen.members.locale)) {
5766 			/*
5767 			 * Previously issued event registration includes
5768 			 * current request. Nothing to do.
5769 			 */
5770 			return 0;
5771 		} else {
5772 			curr_aen.members.locale |= prev_aen.members.locale;
5773 
5774 			if (prev_aen.members.class < curr_aen.members.class)
5775 				curr_aen.members.class = prev_aen.members.class;
5776 
5777 			instance->aen_cmd->abort_aen = 1;
5778 			ret_val = megasas_issue_blocked_abort_cmd(instance,
5779 								  instance->
5780 								  aen_cmd, 30);
5781 
5782 			if (ret_val) {
5783 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5784 				       "previous AEN command\n");
5785 				return ret_val;
5786 			}
5787 		}
5788 	}
5789 
5790 	cmd = megasas_get_cmd(instance);
5791 
5792 	if (!cmd)
5793 		return -ENOMEM;
5794 
5795 	dcmd = &cmd->frame->dcmd;
5796 
5797 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5798 
5799 	/*
5800 	 * Prepare DCMD for aen registration
5801 	 */
5802 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5803 
5804 	dcmd->cmd = MFI_CMD_DCMD;
5805 	dcmd->cmd_status = 0x0;
5806 	dcmd->sge_count = 1;
5807 	dcmd->flags = MFI_FRAME_DIR_READ;
5808 	dcmd->timeout = 0;
5809 	dcmd->pad_0 = 0;
5810 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5811 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5812 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5813 	instance->last_seq_num = seq_num;
5814 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5815 
5816 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
5817 				 sizeof(struct megasas_evt_detail));
5818 
5819 	if (instance->aen_cmd != NULL) {
5820 		megasas_return_cmd(instance, cmd);
5821 		return 0;
5822 	}
5823 
5824 	/*
5825 	 * Store reference to the cmd used to register for AEN. When an
5826 	 * application wants us to register for AEN, we have to abort this
5827 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
5828 	 */
5829 	instance->aen_cmd = cmd;
5830 
5831 	/*
5832 	 * Issue the aen registration frame
5833 	 */
5834 	instance->instancet->issue_dcmd(instance, cmd);
5835 
5836 	return 0;
5837 }
5838 
5839 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5840  *
5841  * This DCMD will fetch few properties of LD/system PD defined
5842  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5843  *
5844  * DCMD send by drivers whenever new target is added to the OS.
5845  *
5846  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
5847  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
5848  *                       0 = system PD, 1 = LD.
5849  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
5850  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
5851  *
5852  * @instance:		Adapter soft state
5853  * @sdev:		OS provided scsi device
5854  *
5855  * Returns 0 on success non-zero on failure.
5856  */
5857 int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)5858 megasas_get_target_prop(struct megasas_instance *instance,
5859 			struct scsi_device *sdev)
5860 {
5861 	int ret;
5862 	struct megasas_cmd *cmd;
5863 	struct megasas_dcmd_frame *dcmd;
5864 	u16 targetId = (sdev->channel % 2) + sdev->id;
5865 
5866 	cmd = megasas_get_cmd(instance);
5867 
5868 	if (!cmd) {
5869 		dev_err(&instance->pdev->dev,
5870 			"Failed to get cmd %s\n", __func__);
5871 		return -ENOMEM;
5872 	}
5873 
5874 	dcmd = &cmd->frame->dcmd;
5875 
5876 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5877 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5878 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5879 
5880 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
5881 	dcmd->cmd = MFI_CMD_DCMD;
5882 	dcmd->cmd_status = 0xFF;
5883 	dcmd->sge_count = 1;
5884 	dcmd->flags = MFI_FRAME_DIR_READ;
5885 	dcmd->timeout = 0;
5886 	dcmd->pad_0 = 0;
5887 	dcmd->data_xfer_len =
5888 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5889 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5890 
5891 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
5892 				 sizeof(struct MR_TARGET_PROPERTIES));
5893 
5894 	if ((instance->adapter_type != MFI_SERIES) &&
5895 	    !instance->mask_interrupts)
5896 		ret = megasas_issue_blocked_cmd(instance,
5897 						cmd, MFI_IO_TIMEOUT_SECS);
5898 	else
5899 		ret = megasas_issue_polled(instance, cmd);
5900 
5901 	switch (ret) {
5902 	case DCMD_TIMEOUT:
5903 		switch (dcmd_timeout_ocr_possible(instance)) {
5904 		case INITIATE_OCR:
5905 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5906 			megasas_reset_fusion(instance->host,
5907 					     MFI_IO_TIMEOUT_OCR);
5908 			break;
5909 		case KILL_ADAPTER:
5910 			megaraid_sas_kill_hba(instance);
5911 			break;
5912 		case IGNORE_TIMEOUT:
5913 			dev_info(&instance->pdev->dev,
5914 				 "Ignore DCMD timeout: %s %d\n",
5915 				 __func__, __LINE__);
5916 			break;
5917 		}
5918 		break;
5919 
5920 	default:
5921 		megasas_return_cmd(instance, cmd);
5922 	}
5923 	if (ret != DCMD_SUCCESS)
5924 		dev_err(&instance->pdev->dev,
5925 			"return from %s %d return value %d\n",
5926 			__func__, __LINE__, ret);
5927 
5928 	return ret;
5929 }
5930 
5931 /**
5932  * megasas_start_aen -	Subscribes to AEN during driver load time
5933  * @instance:		Adapter soft state
5934  */
megasas_start_aen(struct megasas_instance * instance)5935 static int megasas_start_aen(struct megasas_instance *instance)
5936 {
5937 	struct megasas_evt_log_info eli;
5938 	union megasas_evt_class_locale class_locale;
5939 
5940 	/*
5941 	 * Get the latest sequence number from FW
5942 	 */
5943 	memset(&eli, 0, sizeof(eli));
5944 
5945 	if (megasas_get_seq_num(instance, &eli))
5946 		return -1;
5947 
5948 	/*
5949 	 * Register AEN with FW for latest sequence number plus 1
5950 	 */
5951 	class_locale.members.reserved = 0;
5952 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5953 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5954 
5955 	return megasas_register_aen(instance,
5956 			le32_to_cpu(eli.newest_seq_num) + 1,
5957 			class_locale.word);
5958 }
5959 
5960 /**
5961  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
5962  * @instance:		Adapter soft state
5963  */
megasas_io_attach(struct megasas_instance * instance)5964 static int megasas_io_attach(struct megasas_instance *instance)
5965 {
5966 	struct Scsi_Host *host = instance->host;
5967 
5968 	/*
5969 	 * Export parameters required by SCSI mid-layer
5970 	 */
5971 	host->unique_id = instance->unique_id;
5972 	host->can_queue = instance->max_scsi_cmds;
5973 	host->this_id = instance->init_id;
5974 	host->sg_tablesize = instance->max_num_sge;
5975 
5976 	if (instance->fw_support_ieee)
5977 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5978 
5979 	/*
5980 	 * Check if the module parameter value for max_sectors can be used
5981 	 */
5982 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
5983 		instance->max_sectors_per_req = max_sectors;
5984 	else {
5985 		if (max_sectors) {
5986 			if (((instance->pdev->device ==
5987 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5988 				(instance->pdev->device ==
5989 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5990 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
5991 				instance->max_sectors_per_req = max_sectors;
5992 			} else {
5993 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5994 				"and <= %d (or < 1MB for GEN2 controller)\n",
5995 				instance->max_sectors_per_req);
5996 			}
5997 		}
5998 	}
5999 
6000 	host->max_sectors = instance->max_sectors_per_req;
6001 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6002 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6003 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6004 	host->max_lun = MEGASAS_MAX_LUN;
6005 	host->max_cmd_len = 16;
6006 
6007 	/*
6008 	 * Notify the mid-layer about the new controller
6009 	 */
6010 	if (scsi_add_host(host, &instance->pdev->dev)) {
6011 		dev_err(&instance->pdev->dev,
6012 			"Failed to add host from %s %d\n",
6013 			__func__, __LINE__);
6014 		return -ENODEV;
6015 	}
6016 
6017 	return 0;
6018 }
6019 
6020 /**
6021  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6022  *
6023  * @instance:		Adapter soft state
6024  * Description:
6025  *
6026  * For Ventura, driver/FW will operate in 64bit DMA addresses.
6027  *
6028  * For invader-
6029  *	By default, driver/FW will operate in 32bit DMA addresses
6030  *	for consistent DMA mapping but if 32 bit consistent
6031  *	DMA mask fails, driver will try with 64 bit consistent
6032  *	mask provided FW is true 64bit DMA capable
6033  *
6034  * For older controllers(Thunderbolt and MFI based adapters)-
6035  *	driver/FW will operate in 32 bit consistent DMA addresses.
6036  */
6037 static int
megasas_set_dma_mask(struct megasas_instance * instance)6038 megasas_set_dma_mask(struct megasas_instance *instance)
6039 {
6040 	u64 consistent_mask;
6041 	struct pci_dev *pdev;
6042 	u32 scratch_pad_2;
6043 
6044 	pdev = instance->pdev;
6045 	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
6046 				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
6047 
6048 	if (IS_DMA64) {
6049 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6050 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6051 			goto fail_set_dma_mask;
6052 
6053 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
6054 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6055 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6056 			/*
6057 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6058 			 * for FW capable of handling 64 bit DMA.
6059 			 */
6060 			scratch_pad_2 = readl
6061 				(&instance->reg_set->outbound_scratch_pad_2);
6062 
6063 			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6064 				goto fail_set_dma_mask;
6065 			else if (dma_set_mask_and_coherent(&pdev->dev,
6066 							   DMA_BIT_MASK(64)))
6067 				goto fail_set_dma_mask;
6068 		}
6069 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6070 		goto fail_set_dma_mask;
6071 
6072 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6073 		instance->consistent_mask_64bit = false;
6074 	else
6075 		instance->consistent_mask_64bit = true;
6076 
6077 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6078 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
6079 		 (instance->consistent_mask_64bit ? "64" : "32"));
6080 
6081 	return 0;
6082 
6083 fail_set_dma_mask:
6084 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6085 	return -1;
6086 
6087 }
6088 
6089 /*
6090  * megasas_set_adapter_type -	Set adapter type.
6091  *				Supported controllers can be divided in
6092  *				4 categories-  enum MR_ADAPTER_TYPE {
6093  *							MFI_SERIES = 1,
6094  *							THUNDERBOLT_SERIES = 2,
6095  *							INVADER_SERIES = 3,
6096  *							VENTURA_SERIES = 4,
6097  *						};
6098  * @instance:			Adapter soft state
6099  * return:			void
6100  */
megasas_set_adapter_type(struct megasas_instance * instance)6101 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6102 {
6103 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6104 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6105 		instance->adapter_type = MFI_SERIES;
6106 	} else {
6107 		switch (instance->pdev->device) {
6108 		case PCI_DEVICE_ID_LSI_VENTURA:
6109 		case PCI_DEVICE_ID_LSI_CRUSADER:
6110 		case PCI_DEVICE_ID_LSI_HARPOON:
6111 		case PCI_DEVICE_ID_LSI_TOMCAT:
6112 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6113 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6114 			instance->adapter_type = VENTURA_SERIES;
6115 			break;
6116 		case PCI_DEVICE_ID_LSI_FUSION:
6117 		case PCI_DEVICE_ID_LSI_PLASMA:
6118 			instance->adapter_type = THUNDERBOLT_SERIES;
6119 			break;
6120 		case PCI_DEVICE_ID_LSI_INVADER:
6121 		case PCI_DEVICE_ID_LSI_INTRUDER:
6122 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6123 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6124 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6125 		case PCI_DEVICE_ID_LSI_FURY:
6126 			instance->adapter_type = INVADER_SERIES;
6127 			break;
6128 		default: /* For all other supported controllers */
6129 			instance->adapter_type = MFI_SERIES;
6130 			break;
6131 		}
6132 	}
6133 }
6134 
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)6135 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6136 {
6137 	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6138 						  &instance->producer_h);
6139 	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6140 						  &instance->consumer_h);
6141 
6142 	if (!instance->producer || !instance->consumer) {
6143 		dev_err(&instance->pdev->dev,
6144 			"Failed to allocate memory for producer, consumer\n");
6145 		return -1;
6146 	}
6147 
6148 	*instance->producer = 0;
6149 	*instance->consumer = 0;
6150 	return 0;
6151 }
6152 
6153 /**
6154  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6155  *				structures which are not common across MFI
6156  *				adapters and fusion adapters.
6157  *				For MFI based adapters, allocate producer and
6158  *				consumer buffers. For fusion adapters, allocate
6159  *				memory for fusion context.
6160  * @instance:			Adapter soft state
6161  * return:			0 for SUCCESS
6162  */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)6163 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6164 {
6165 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6166 				      GFP_KERNEL);
6167 	if (!instance->reply_map)
6168 		return -ENOMEM;
6169 
6170 	switch (instance->adapter_type) {
6171 	case MFI_SERIES:
6172 		if (megasas_alloc_mfi_ctrl_mem(instance))
6173 			goto fail;
6174 		break;
6175 	case VENTURA_SERIES:
6176 	case THUNDERBOLT_SERIES:
6177 	case INVADER_SERIES:
6178 		if (megasas_alloc_fusion_context(instance))
6179 			goto fail;
6180 		break;
6181 	}
6182 
6183 	return 0;
6184  fail:
6185 	kfree(instance->reply_map);
6186 	instance->reply_map = NULL;
6187 	return -ENOMEM;
6188 }
6189 
6190 /*
6191  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6192  *				producer, consumer buffers for MFI adapters
6193  *
6194  * @instance -			Adapter soft instance
6195  *
6196  */
megasas_free_ctrl_mem(struct megasas_instance * instance)6197 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6198 {
6199 	kfree(instance->reply_map);
6200 	if (instance->adapter_type == MFI_SERIES) {
6201 		if (instance->producer)
6202 			pci_free_consistent(instance->pdev, sizeof(u32),
6203 					    instance->producer,
6204 					    instance->producer_h);
6205 		if (instance->consumer)
6206 			pci_free_consistent(instance->pdev, sizeof(u32),
6207 					    instance->consumer,
6208 					    instance->consumer_h);
6209 	} else {
6210 		megasas_free_fusion_context(instance);
6211 	}
6212 }
6213 
6214 /**
6215  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6216  *					driver load time
6217  *
6218  * @instance-				Adapter soft instance
6219  * @return-				O for SUCCESS
6220  */
6221 static inline
megasas_alloc_ctrl_dma_buffers(struct megasas_instance * instance)6222 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6223 {
6224 	struct pci_dev *pdev = instance->pdev;
6225 	struct fusion_context *fusion = instance->ctrl_context;
6226 
6227 	instance->evt_detail =
6228 		pci_alloc_consistent(pdev,
6229 				     sizeof(struct megasas_evt_detail),
6230 				     &instance->evt_detail_h);
6231 
6232 	if (!instance->evt_detail) {
6233 		dev_err(&instance->pdev->dev,
6234 			"Failed to allocate event detail buffer\n");
6235 		return -ENOMEM;
6236 	}
6237 
6238 	if (fusion) {
6239 		fusion->ioc_init_request =
6240 			dma_alloc_coherent(&pdev->dev,
6241 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6242 					   &fusion->ioc_init_request_phys,
6243 					   GFP_KERNEL);
6244 
6245 		if (!fusion->ioc_init_request) {
6246 			dev_err(&pdev->dev,
6247 				"Failed to allocate PD list buffer\n");
6248 			return -ENOMEM;
6249 		}
6250 	}
6251 
6252 	instance->pd_list_buf =
6253 		pci_alloc_consistent(pdev,
6254 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6255 				     &instance->pd_list_buf_h);
6256 
6257 	if (!instance->pd_list_buf) {
6258 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6259 		return -ENOMEM;
6260 	}
6261 
6262 	instance->ctrl_info_buf =
6263 		pci_alloc_consistent(pdev,
6264 				     sizeof(struct megasas_ctrl_info),
6265 				     &instance->ctrl_info_buf_h);
6266 
6267 	if (!instance->ctrl_info_buf) {
6268 		dev_err(&pdev->dev,
6269 			"Failed to allocate controller info buffer\n");
6270 		return -ENOMEM;
6271 	}
6272 
6273 	instance->ld_list_buf =
6274 		pci_alloc_consistent(pdev,
6275 				     sizeof(struct MR_LD_LIST),
6276 				     &instance->ld_list_buf_h);
6277 
6278 	if (!instance->ld_list_buf) {
6279 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6280 		return -ENOMEM;
6281 	}
6282 
6283 	instance->ld_targetid_list_buf =
6284 		pci_alloc_consistent(pdev,
6285 				     sizeof(struct MR_LD_TARGETID_LIST),
6286 				     &instance->ld_targetid_list_buf_h);
6287 
6288 	if (!instance->ld_targetid_list_buf) {
6289 		dev_err(&pdev->dev,
6290 			"Failed to allocate LD targetid list buffer\n");
6291 		return -ENOMEM;
6292 	}
6293 
6294 	if (!reset_devices) {
6295 		instance->system_info_buf =
6296 			pci_alloc_consistent(pdev,
6297 					     sizeof(struct MR_DRV_SYSTEM_INFO),
6298 					     &instance->system_info_h);
6299 		instance->pd_info =
6300 			pci_alloc_consistent(pdev,
6301 					     sizeof(struct MR_PD_INFO),
6302 					     &instance->pd_info_h);
6303 		instance->tgt_prop =
6304 			pci_alloc_consistent(pdev,
6305 					     sizeof(struct MR_TARGET_PROPERTIES),
6306 					     &instance->tgt_prop_h);
6307 		instance->crash_dump_buf =
6308 			pci_alloc_consistent(pdev,
6309 					     CRASH_DMA_BUF_SIZE,
6310 					     &instance->crash_dump_h);
6311 
6312 		if (!instance->system_info_buf)
6313 			dev_err(&instance->pdev->dev,
6314 				"Failed to allocate system info buffer\n");
6315 
6316 		if (!instance->pd_info)
6317 			dev_err(&instance->pdev->dev,
6318 				"Failed to allocate pd_info buffer\n");
6319 
6320 		if (!instance->tgt_prop)
6321 			dev_err(&instance->pdev->dev,
6322 				"Failed to allocate tgt_prop buffer\n");
6323 
6324 		if (!instance->crash_dump_buf)
6325 			dev_err(&instance->pdev->dev,
6326 				"Failed to allocate crash dump buffer\n");
6327 	}
6328 
6329 	return 0;
6330 }
6331 
6332 /*
6333  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
6334  *					during driver load time
6335  *
6336  * @instance-				Adapter soft instance
6337  *
6338  */
6339 static inline
megasas_free_ctrl_dma_buffers(struct megasas_instance * instance)6340 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6341 {
6342 	struct pci_dev *pdev = instance->pdev;
6343 	struct fusion_context *fusion = instance->ctrl_context;
6344 
6345 	if (instance->evt_detail)
6346 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6347 				    instance->evt_detail,
6348 				    instance->evt_detail_h);
6349 
6350 	if (fusion && fusion->ioc_init_request)
6351 		dma_free_coherent(&pdev->dev,
6352 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
6353 				  fusion->ioc_init_request,
6354 				  fusion->ioc_init_request_phys);
6355 
6356 	if (instance->pd_list_buf)
6357 		pci_free_consistent(pdev,
6358 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6359 				    instance->pd_list_buf,
6360 				    instance->pd_list_buf_h);
6361 
6362 	if (instance->ld_list_buf)
6363 		pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
6364 				    instance->ld_list_buf,
6365 				    instance->ld_list_buf_h);
6366 
6367 	if (instance->ld_targetid_list_buf)
6368 		pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
6369 				    instance->ld_targetid_list_buf,
6370 				    instance->ld_targetid_list_buf_h);
6371 
6372 	if (instance->ctrl_info_buf)
6373 		pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
6374 				    instance->ctrl_info_buf,
6375 				    instance->ctrl_info_buf_h);
6376 
6377 	if (instance->system_info_buf)
6378 		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6379 				    instance->system_info_buf,
6380 				    instance->system_info_h);
6381 
6382 	if (instance->pd_info)
6383 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6384 				    instance->pd_info, instance->pd_info_h);
6385 
6386 	if (instance->tgt_prop)
6387 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6388 				    instance->tgt_prop, instance->tgt_prop_h);
6389 
6390 	if (instance->crash_dump_buf)
6391 		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6392 				    instance->crash_dump_buf,
6393 				    instance->crash_dump_h);
6394 }
6395 
6396 /*
6397  * megasas_init_ctrl_params -		Initialize controller's instance
6398  *					parameters before FW init
6399  * @instance -				Adapter soft instance
6400  * @return -				void
6401  */
megasas_init_ctrl_params(struct megasas_instance * instance)6402 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6403 {
6404 	instance->fw_crash_state = UNAVAILABLE;
6405 
6406 	megasas_poll_wait_aen = 0;
6407 	instance->issuepend_done = 1;
6408 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6409 
6410 	/*
6411 	 * Initialize locks and queues
6412 	 */
6413 	INIT_LIST_HEAD(&instance->cmd_pool);
6414 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6415 
6416 	atomic_set(&instance->fw_outstanding, 0);
6417 
6418 	init_waitqueue_head(&instance->int_cmd_wait_q);
6419 	init_waitqueue_head(&instance->abort_cmd_wait_q);
6420 
6421 	spin_lock_init(&instance->crashdump_lock);
6422 	spin_lock_init(&instance->mfi_pool_lock);
6423 	spin_lock_init(&instance->hba_lock);
6424 	spin_lock_init(&instance->stream_lock);
6425 	spin_lock_init(&instance->completion_lock);
6426 
6427 	mutex_init(&instance->reset_mutex);
6428 
6429 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6430 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6431 		instance->flag_ieee = 1;
6432 
6433 	megasas_dbg_lvl = 0;
6434 	instance->flag = 0;
6435 	instance->unload = 1;
6436 	instance->last_time = 0;
6437 	instance->disableOnlineCtrlReset = 1;
6438 	instance->UnevenSpanSupport = 0;
6439 
6440 	if (instance->adapter_type != MFI_SERIES) {
6441 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6442 		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6443 	} else {
6444 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6445 	}
6446 }
6447 
6448 /**
6449  * megasas_probe_one -	PCI hotplug entry point
6450  * @pdev:		PCI device structure
6451  * @id:			PCI ids of supported hotplugged adapter
6452  */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)6453 static int megasas_probe_one(struct pci_dev *pdev,
6454 			     const struct pci_device_id *id)
6455 {
6456 	int rval, pos;
6457 	struct Scsi_Host *host;
6458 	struct megasas_instance *instance;
6459 	u16 control = 0;
6460 
6461 	/* Reset MSI-X in the kdump kernel */
6462 	if (reset_devices) {
6463 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6464 		if (pos) {
6465 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6466 					     &control);
6467 			if (control & PCI_MSIX_FLAGS_ENABLE) {
6468 				dev_info(&pdev->dev, "resetting MSI-X\n");
6469 				pci_write_config_word(pdev,
6470 						      pos + PCI_MSIX_FLAGS,
6471 						      control &
6472 						      ~PCI_MSIX_FLAGS_ENABLE);
6473 			}
6474 		}
6475 	}
6476 
6477 	/*
6478 	 * PCI prepping: enable device set bus mastering and dma mask
6479 	 */
6480 	rval = pci_enable_device_mem(pdev);
6481 
6482 	if (rval) {
6483 		return rval;
6484 	}
6485 
6486 	pci_set_master(pdev);
6487 
6488 	host = scsi_host_alloc(&megasas_template,
6489 			       sizeof(struct megasas_instance));
6490 
6491 	if (!host) {
6492 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6493 		goto fail_alloc_instance;
6494 	}
6495 
6496 	instance = (struct megasas_instance *)host->hostdata;
6497 	memset(instance, 0, sizeof(*instance));
6498 	atomic_set(&instance->fw_reset_no_pci_access, 0);
6499 
6500 	/*
6501 	 * Initialize PCI related and misc parameters
6502 	 */
6503 	instance->pdev = pdev;
6504 	instance->host = host;
6505 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6506 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6507 
6508 	megasas_set_adapter_type(instance);
6509 
6510 	/*
6511 	 * Initialize MFI Firmware
6512 	 */
6513 	if (megasas_init_fw(instance))
6514 		goto fail_init_mfi;
6515 
6516 	if (instance->requestorId) {
6517 		if (instance->PlasmaFW111) {
6518 			instance->vf_affiliation_111 =
6519 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6520 						     &instance->vf_affiliation_111_h);
6521 			if (!instance->vf_affiliation_111)
6522 				dev_warn(&pdev->dev, "Can't allocate "
6523 				       "memory for VF affiliation buffer\n");
6524 		} else {
6525 			instance->vf_affiliation =
6526 				pci_alloc_consistent(pdev,
6527 						     (MAX_LOGICAL_DRIVES + 1) *
6528 						     sizeof(struct MR_LD_VF_AFFILIATION),
6529 						     &instance->vf_affiliation_h);
6530 			if (!instance->vf_affiliation)
6531 				dev_warn(&pdev->dev, "Can't allocate "
6532 				       "memory for VF affiliation buffer\n");
6533 		}
6534 	}
6535 
6536 	/*
6537 	 * Store instance in PCI softstate
6538 	 */
6539 	pci_set_drvdata(pdev, instance);
6540 
6541 	/*
6542 	 * Add this controller to megasas_mgmt_info structure so that it
6543 	 * can be exported to management applications
6544 	 */
6545 	megasas_mgmt_info.count++;
6546 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6547 	megasas_mgmt_info.max_index++;
6548 
6549 	/*
6550 	 * Register with SCSI mid-layer
6551 	 */
6552 	if (megasas_io_attach(instance))
6553 		goto fail_io_attach;
6554 
6555 	instance->unload = 0;
6556 	/*
6557 	 * Trigger SCSI to scan our drives
6558 	 */
6559 	scsi_scan_host(host);
6560 
6561 	/*
6562 	 * Initiate AEN (Asynchronous Event Notification)
6563 	 */
6564 	if (megasas_start_aen(instance)) {
6565 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6566 		goto fail_start_aen;
6567 	}
6568 
6569 	/* Get current SR-IOV LD/VF affiliation */
6570 	if (instance->requestorId)
6571 		megasas_get_ld_vf_affiliation(instance, 1);
6572 
6573 	return 0;
6574 
6575 fail_start_aen:
6576 fail_io_attach:
6577 	megasas_mgmt_info.count--;
6578 	megasas_mgmt_info.max_index--;
6579 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6580 
6581 	instance->instancet->disable_intr(instance);
6582 	megasas_destroy_irqs(instance);
6583 
6584 	if (instance->adapter_type != MFI_SERIES)
6585 		megasas_release_fusion(instance);
6586 	else
6587 		megasas_release_mfi(instance);
6588 	if (instance->msix_vectors)
6589 		pci_free_irq_vectors(instance->pdev);
6590 fail_init_mfi:
6591 	scsi_host_put(host);
6592 fail_alloc_instance:
6593 	pci_disable_device(pdev);
6594 
6595 	return -ENODEV;
6596 }
6597 
6598 /**
6599  * megasas_flush_cache -	Requests FW to flush all its caches
6600  * @instance:			Adapter soft state
6601  */
megasas_flush_cache(struct megasas_instance * instance)6602 static void megasas_flush_cache(struct megasas_instance *instance)
6603 {
6604 	struct megasas_cmd *cmd;
6605 	struct megasas_dcmd_frame *dcmd;
6606 
6607 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6608 		return;
6609 
6610 	cmd = megasas_get_cmd(instance);
6611 
6612 	if (!cmd)
6613 		return;
6614 
6615 	dcmd = &cmd->frame->dcmd;
6616 
6617 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6618 
6619 	dcmd->cmd = MFI_CMD_DCMD;
6620 	dcmd->cmd_status = 0x0;
6621 	dcmd->sge_count = 0;
6622 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6623 	dcmd->timeout = 0;
6624 	dcmd->pad_0 = 0;
6625 	dcmd->data_xfer_len = 0;
6626 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6627 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6628 
6629 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6630 			!= DCMD_SUCCESS) {
6631 		dev_err(&instance->pdev->dev,
6632 			"return from %s %d\n", __func__, __LINE__);
6633 		return;
6634 	}
6635 
6636 	megasas_return_cmd(instance, cmd);
6637 }
6638 
6639 /**
6640  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
6641  * @instance:				Adapter soft state
6642  * @opcode:				Shutdown/Hibernate
6643  */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)6644 static void megasas_shutdown_controller(struct megasas_instance *instance,
6645 					u32 opcode)
6646 {
6647 	struct megasas_cmd *cmd;
6648 	struct megasas_dcmd_frame *dcmd;
6649 
6650 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6651 		return;
6652 
6653 	cmd = megasas_get_cmd(instance);
6654 
6655 	if (!cmd)
6656 		return;
6657 
6658 	if (instance->aen_cmd)
6659 		megasas_issue_blocked_abort_cmd(instance,
6660 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6661 	if (instance->map_update_cmd)
6662 		megasas_issue_blocked_abort_cmd(instance,
6663 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6664 	if (instance->jbod_seq_cmd)
6665 		megasas_issue_blocked_abort_cmd(instance,
6666 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6667 
6668 	dcmd = &cmd->frame->dcmd;
6669 
6670 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6671 
6672 	dcmd->cmd = MFI_CMD_DCMD;
6673 	dcmd->cmd_status = 0x0;
6674 	dcmd->sge_count = 0;
6675 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6676 	dcmd->timeout = 0;
6677 	dcmd->pad_0 = 0;
6678 	dcmd->data_xfer_len = 0;
6679 	dcmd->opcode = cpu_to_le32(opcode);
6680 
6681 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6682 			!= DCMD_SUCCESS) {
6683 		dev_err(&instance->pdev->dev,
6684 			"return from %s %d\n", __func__, __LINE__);
6685 		return;
6686 	}
6687 
6688 	megasas_return_cmd(instance, cmd);
6689 }
6690 
6691 #ifdef CONFIG_PM
6692 /**
6693  * megasas_suspend -	driver suspend entry point
6694  * @pdev:		PCI device structure
6695  * @state:		PCI power state to suspend routine
6696  */
6697 static int
megasas_suspend(struct pci_dev * pdev,pm_message_t state)6698 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6699 {
6700 	struct Scsi_Host *host;
6701 	struct megasas_instance *instance;
6702 
6703 	instance = pci_get_drvdata(pdev);
6704 	host = instance->host;
6705 	instance->unload = 1;
6706 
6707 	/* Shutdown SR-IOV heartbeat timer */
6708 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6709 		del_timer_sync(&instance->sriov_heartbeat_timer);
6710 
6711 	megasas_flush_cache(instance);
6712 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6713 
6714 	/* cancel the delayed work if this work still in queue */
6715 	if (instance->ev != NULL) {
6716 		struct megasas_aen_event *ev = instance->ev;
6717 		cancel_delayed_work_sync(&ev->hotplug_work);
6718 		instance->ev = NULL;
6719 	}
6720 
6721 	tasklet_kill(&instance->isr_tasklet);
6722 
6723 	pci_set_drvdata(instance->pdev, instance);
6724 	instance->instancet->disable_intr(instance);
6725 
6726 	megasas_destroy_irqs(instance);
6727 
6728 	if (instance->msix_vectors)
6729 		pci_free_irq_vectors(instance->pdev);
6730 
6731 	pci_save_state(pdev);
6732 	pci_disable_device(pdev);
6733 
6734 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
6735 
6736 	return 0;
6737 }
6738 
6739 /**
6740  * megasas_resume-      driver resume entry point
6741  * @pdev:               PCI device structure
6742  */
6743 static int
megasas_resume(struct pci_dev * pdev)6744 megasas_resume(struct pci_dev *pdev)
6745 {
6746 	int rval;
6747 	struct Scsi_Host *host;
6748 	struct megasas_instance *instance;
6749 	int irq_flags = PCI_IRQ_LEGACY;
6750 
6751 	instance = pci_get_drvdata(pdev);
6752 	host = instance->host;
6753 	pci_set_power_state(pdev, PCI_D0);
6754 	pci_enable_wake(pdev, PCI_D0, 0);
6755 	pci_restore_state(pdev);
6756 
6757 	/*
6758 	 * PCI prepping: enable device set bus mastering and dma mask
6759 	 */
6760 	rval = pci_enable_device_mem(pdev);
6761 
6762 	if (rval) {
6763 		dev_err(&pdev->dev, "Enable device failed\n");
6764 		return rval;
6765 	}
6766 
6767 	pci_set_master(pdev);
6768 
6769 	/*
6770 	 * We expect the FW state to be READY
6771 	 */
6772 	if (megasas_transition_to_ready(instance, 0))
6773 		goto fail_ready_state;
6774 
6775 	if (megasas_set_dma_mask(instance))
6776 		goto fail_set_dma_mask;
6777 
6778 	/*
6779 	 * Initialize MFI Firmware
6780 	 */
6781 
6782 	atomic_set(&instance->fw_outstanding, 0);
6783 	atomic_set(&instance->ldio_outstanding, 0);
6784 
6785 	/* Now re-enable MSI-X */
6786 	if (instance->msix_vectors) {
6787 		irq_flags = PCI_IRQ_MSIX;
6788 		if (smp_affinity_enable)
6789 			irq_flags |= PCI_IRQ_AFFINITY;
6790 	}
6791 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
6792 				     instance->msix_vectors ?
6793 				     instance->msix_vectors : 1, irq_flags);
6794 	if (rval < 0)
6795 		goto fail_reenable_msix;
6796 
6797 	megasas_setup_reply_map(instance);
6798 
6799 	if (instance->adapter_type != MFI_SERIES) {
6800 		megasas_reset_reply_desc(instance);
6801 		if (megasas_ioc_init_fusion(instance)) {
6802 			megasas_free_cmds(instance);
6803 			megasas_free_cmds_fusion(instance);
6804 			goto fail_init_mfi;
6805 		}
6806 		if (!megasas_get_map_info(instance))
6807 			megasas_sync_map_info(instance);
6808 	} else {
6809 		*instance->producer = 0;
6810 		*instance->consumer = 0;
6811 		if (megasas_issue_init_mfi(instance))
6812 			goto fail_init_mfi;
6813 	}
6814 
6815 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
6816 		goto fail_init_mfi;
6817 
6818 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6819 		     (unsigned long)instance);
6820 
6821 	if (instance->msix_vectors ?
6822 			megasas_setup_irqs_msix(instance, 0) :
6823 			megasas_setup_irqs_ioapic(instance))
6824 		goto fail_init_mfi;
6825 
6826 	/* Re-launch SR-IOV heartbeat timer */
6827 	if (instance->requestorId) {
6828 		if (!megasas_sriov_start_heartbeat(instance, 0))
6829 			megasas_start_timer(instance);
6830 		else {
6831 			instance->skip_heartbeat_timer_del = 1;
6832 			goto fail_init_mfi;
6833 		}
6834 	}
6835 
6836 	instance->instancet->enable_intr(instance);
6837 	megasas_setup_jbod_map(instance);
6838 	instance->unload = 0;
6839 
6840 	/*
6841 	 * Initiate AEN (Asynchronous Event Notification)
6842 	 */
6843 	if (megasas_start_aen(instance))
6844 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
6845 
6846 	return 0;
6847 
6848 fail_init_mfi:
6849 	megasas_free_ctrl_dma_buffers(instance);
6850 	megasas_free_ctrl_mem(instance);
6851 	scsi_host_put(host);
6852 
6853 fail_reenable_msix:
6854 fail_set_dma_mask:
6855 fail_ready_state:
6856 
6857 	pci_disable_device(pdev);
6858 
6859 	return -ENODEV;
6860 }
6861 #else
6862 #define megasas_suspend	NULL
6863 #define megasas_resume	NULL
6864 #endif
6865 
6866 static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)6867 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6868 {
6869 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6870 	int i;
6871 	u8 adp_state;
6872 
6873 	for (i = 0; i < wait_time; i++) {
6874 		adp_state = atomic_read(&instance->adprecovery);
6875 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
6876 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
6877 			break;
6878 
6879 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6880 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6881 
6882 		msleep(1000);
6883 	}
6884 
6885 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
6886 		dev_info(&instance->pdev->dev,
6887 			 "%s HBA failed to become operational, adp_state %d\n",
6888 			 __func__, adp_state);
6889 		return 1;
6890 	}
6891 
6892 	return 0;
6893 }
6894 
6895 /**
6896  * megasas_detach_one -	PCI hot"un"plug entry point
6897  * @pdev:		PCI device structure
6898  */
megasas_detach_one(struct pci_dev * pdev)6899 static void megasas_detach_one(struct pci_dev *pdev)
6900 {
6901 	int i;
6902 	struct Scsi_Host *host;
6903 	struct megasas_instance *instance;
6904 	struct fusion_context *fusion;
6905 	u32 pd_seq_map_sz;
6906 
6907 	instance = pci_get_drvdata(pdev);
6908 	host = instance->host;
6909 	fusion = instance->ctrl_context;
6910 
6911 	/* Shutdown SR-IOV heartbeat timer */
6912 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6913 		del_timer_sync(&instance->sriov_heartbeat_timer);
6914 
6915 	if (instance->fw_crash_state != UNAVAILABLE)
6916 		megasas_free_host_crash_buffer(instance);
6917 	scsi_remove_host(instance->host);
6918 	instance->unload = 1;
6919 
6920 	if (megasas_wait_for_adapter_operational(instance))
6921 		goto skip_firing_dcmds;
6922 
6923 	megasas_flush_cache(instance);
6924 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6925 
6926 skip_firing_dcmds:
6927 	/* cancel the delayed work if this work still in queue*/
6928 	if (instance->ev != NULL) {
6929 		struct megasas_aen_event *ev = instance->ev;
6930 		cancel_delayed_work_sync(&ev->hotplug_work);
6931 		instance->ev = NULL;
6932 	}
6933 
6934 	/* cancel all wait events */
6935 	wake_up_all(&instance->int_cmd_wait_q);
6936 
6937 	tasklet_kill(&instance->isr_tasklet);
6938 
6939 	/*
6940 	 * Take the instance off the instance array. Note that we will not
6941 	 * decrement the max_index. We let this array be sparse array
6942 	 */
6943 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6944 		if (megasas_mgmt_info.instance[i] == instance) {
6945 			megasas_mgmt_info.count--;
6946 			megasas_mgmt_info.instance[i] = NULL;
6947 
6948 			break;
6949 		}
6950 	}
6951 
6952 	instance->instancet->disable_intr(instance);
6953 
6954 	megasas_destroy_irqs(instance);
6955 
6956 	if (instance->msix_vectors)
6957 		pci_free_irq_vectors(instance->pdev);
6958 
6959 	if (instance->adapter_type == VENTURA_SERIES) {
6960 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6961 			kfree(fusion->stream_detect_by_ld[i]);
6962 		kfree(fusion->stream_detect_by_ld);
6963 		fusion->stream_detect_by_ld = NULL;
6964 	}
6965 
6966 
6967 	if (instance->adapter_type != MFI_SERIES) {
6968 		megasas_release_fusion(instance);
6969 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6970 				(sizeof(struct MR_PD_CFG_SEQ) *
6971 					(MAX_PHYSICAL_DEVICES - 1));
6972 		for (i = 0; i < 2 ; i++) {
6973 			if (fusion->ld_map[i])
6974 				dma_free_coherent(&instance->pdev->dev,
6975 						  fusion->max_map_sz,
6976 						  fusion->ld_map[i],
6977 						  fusion->ld_map_phys[i]);
6978 			if (fusion->ld_drv_map[i]) {
6979 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6980 					vfree(fusion->ld_drv_map[i]);
6981 				else
6982 					free_pages((ulong)fusion->ld_drv_map[i],
6983 						   fusion->drv_map_pages);
6984 			}
6985 
6986 			if (fusion->pd_seq_sync[i])
6987 				dma_free_coherent(&instance->pdev->dev,
6988 					pd_seq_map_sz,
6989 					fusion->pd_seq_sync[i],
6990 					fusion->pd_seq_phys[i]);
6991 		}
6992 	} else {
6993 		megasas_release_mfi(instance);
6994 	}
6995 
6996 	if (instance->vf_affiliation)
6997 		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6998 				    sizeof(struct MR_LD_VF_AFFILIATION),
6999 				    instance->vf_affiliation,
7000 				    instance->vf_affiliation_h);
7001 
7002 	if (instance->vf_affiliation_111)
7003 		pci_free_consistent(pdev,
7004 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7005 				    instance->vf_affiliation_111,
7006 				    instance->vf_affiliation_111_h);
7007 
7008 	if (instance->hb_host_mem)
7009 		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7010 				    instance->hb_host_mem,
7011 				    instance->hb_host_mem_h);
7012 
7013 	megasas_free_ctrl_dma_buffers(instance);
7014 
7015 	megasas_free_ctrl_mem(instance);
7016 
7017 	scsi_host_put(host);
7018 
7019 	pci_disable_device(pdev);
7020 }
7021 
7022 /**
7023  * megasas_shutdown -	Shutdown entry point
7024  * @device:		Generic device structure
7025  */
megasas_shutdown(struct pci_dev * pdev)7026 static void megasas_shutdown(struct pci_dev *pdev)
7027 {
7028 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7029 
7030 	instance->unload = 1;
7031 
7032 	if (megasas_wait_for_adapter_operational(instance))
7033 		goto skip_firing_dcmds;
7034 
7035 	megasas_flush_cache(instance);
7036 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7037 
7038 skip_firing_dcmds:
7039 	instance->instancet->disable_intr(instance);
7040 	megasas_destroy_irqs(instance);
7041 
7042 	if (instance->msix_vectors)
7043 		pci_free_irq_vectors(instance->pdev);
7044 }
7045 
7046 /**
7047  * megasas_mgmt_open -	char node "open" entry point
7048  */
megasas_mgmt_open(struct inode * inode,struct file * filep)7049 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7050 {
7051 	/*
7052 	 * Allow only those users with admin rights
7053 	 */
7054 	if (!capable(CAP_SYS_ADMIN))
7055 		return -EACCES;
7056 
7057 	return 0;
7058 }
7059 
7060 /**
7061  * megasas_mgmt_fasync -	Async notifier registration from applications
7062  *
7063  * This function adds the calling process to a driver global queue. When an
7064  * event occurs, SIGIO will be sent to all processes in this queue.
7065  */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)7066 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7067 {
7068 	int rc;
7069 
7070 	mutex_lock(&megasas_async_queue_mutex);
7071 
7072 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7073 
7074 	mutex_unlock(&megasas_async_queue_mutex);
7075 
7076 	if (rc >= 0) {
7077 		/* For sanity check when we get ioctl */
7078 		filep->private_data = filep;
7079 		return 0;
7080 	}
7081 
7082 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7083 
7084 	return rc;
7085 }
7086 
7087 /**
7088  * megasas_mgmt_poll -  char node "poll" entry point
7089  * */
megasas_mgmt_poll(struct file * file,poll_table * wait)7090 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7091 {
7092 	__poll_t mask;
7093 	unsigned long flags;
7094 
7095 	poll_wait(file, &megasas_poll_wait, wait);
7096 	spin_lock_irqsave(&poll_aen_lock, flags);
7097 	if (megasas_poll_wait_aen)
7098 		mask = (EPOLLIN | EPOLLRDNORM);
7099 	else
7100 		mask = 0;
7101 	megasas_poll_wait_aen = 0;
7102 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7103 	return mask;
7104 }
7105 
7106 /*
7107  * megasas_set_crash_dump_params_ioctl:
7108  *		Send CRASH_DUMP_MODE DCMD to all controllers
7109  * @cmd:	MFI command frame
7110  */
7111 
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)7112 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7113 {
7114 	struct megasas_instance *local_instance;
7115 	int i, error = 0;
7116 	int crash_support;
7117 
7118 	crash_support = cmd->frame->dcmd.mbox.w[0];
7119 
7120 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7121 		local_instance = megasas_mgmt_info.instance[i];
7122 		if (local_instance && local_instance->crash_dump_drv_support) {
7123 			if ((atomic_read(&local_instance->adprecovery) ==
7124 				MEGASAS_HBA_OPERATIONAL) &&
7125 				!megasas_set_crash_dump_params(local_instance,
7126 					crash_support)) {
7127 				local_instance->crash_dump_app_support =
7128 					crash_support;
7129 				dev_info(&local_instance->pdev->dev,
7130 					"Application firmware crash "
7131 					"dump mode set success\n");
7132 				error = 0;
7133 			} else {
7134 				dev_info(&local_instance->pdev->dev,
7135 					"Application firmware crash "
7136 					"dump mode set failed\n");
7137 				error = -1;
7138 			}
7139 		}
7140 	}
7141 	return error;
7142 }
7143 
7144 /**
7145  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7146  * @instance:			Adapter soft state
7147  * @argp:			User's ioctl packet
7148  */
7149 static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)7150 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7151 		      struct megasas_iocpacket __user * user_ioc,
7152 		      struct megasas_iocpacket *ioc)
7153 {
7154 	struct megasas_sge64 *kern_sge64 = NULL;
7155 	struct megasas_sge32 *kern_sge32 = NULL;
7156 	struct megasas_cmd *cmd;
7157 	void *kbuff_arr[MAX_IOCTL_SGE];
7158 	dma_addr_t buf_handle = 0;
7159 	int error = 0, i;
7160 	void *sense = NULL;
7161 	dma_addr_t sense_handle;
7162 	unsigned long *sense_ptr;
7163 	u32 opcode = 0;
7164 
7165 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7166 
7167 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7168 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7169 		       ioc->sge_count, MAX_IOCTL_SGE);
7170 		return -EINVAL;
7171 	}
7172 
7173 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7174 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7175 	    !instance->support_nvme_passthru)) {
7176 		dev_err(&instance->pdev->dev,
7177 			"Received invalid ioctl command 0x%x\n",
7178 			ioc->frame.hdr.cmd);
7179 		return -ENOTSUPP;
7180 	}
7181 
7182 	cmd = megasas_get_cmd(instance);
7183 	if (!cmd) {
7184 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7185 		return -ENOMEM;
7186 	}
7187 
7188 	/*
7189 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7190 	 * frames into our cmd's frames. cmd->frame's context will get
7191 	 * overwritten when we copy from user's frames. So set that value
7192 	 * alone separately
7193 	 */
7194 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7195 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7196 	cmd->frame->hdr.pad_0 = 0;
7197 
7198 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7199 
7200 	if (instance->consistent_mask_64bit)
7201 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7202 				       MFI_FRAME_SENSE64));
7203 	else
7204 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7205 					       MFI_FRAME_SENSE64));
7206 
7207 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7208 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7209 
7210 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7211 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7212 			megasas_return_cmd(instance, cmd);
7213 			return -1;
7214 		}
7215 	}
7216 
7217 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7218 		error = megasas_set_crash_dump_params_ioctl(cmd);
7219 		megasas_return_cmd(instance, cmd);
7220 		return error;
7221 	}
7222 
7223 	/*
7224 	 * The management interface between applications and the fw uses
7225 	 * MFI frames. E.g, RAID configuration changes, LD property changes
7226 	 * etc are accomplishes through different kinds of MFI frames. The
7227 	 * driver needs to care only about substituting user buffers with
7228 	 * kernel buffers in SGLs. The location of SGL is embedded in the
7229 	 * struct iocpacket itself.
7230 	 */
7231 	if (instance->consistent_mask_64bit)
7232 		kern_sge64 = (struct megasas_sge64 *)
7233 			((unsigned long)cmd->frame + ioc->sgl_off);
7234 	else
7235 		kern_sge32 = (struct megasas_sge32 *)
7236 			((unsigned long)cmd->frame + ioc->sgl_off);
7237 
7238 	/*
7239 	 * For each user buffer, create a mirror buffer and copy in
7240 	 */
7241 	for (i = 0; i < ioc->sge_count; i++) {
7242 		if (!ioc->sgl[i].iov_len)
7243 			continue;
7244 
7245 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7246 						    ioc->sgl[i].iov_len,
7247 						    &buf_handle, GFP_KERNEL);
7248 		if (!kbuff_arr[i]) {
7249 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7250 			       "kernel SGL buffer for IOCTL\n");
7251 			error = -ENOMEM;
7252 			goto out;
7253 		}
7254 
7255 		/*
7256 		 * We don't change the dma_coherent_mask, so
7257 		 * pci_alloc_consistent only returns 32bit addresses
7258 		 */
7259 		if (instance->consistent_mask_64bit) {
7260 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7261 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7262 		} else {
7263 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7264 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7265 		}
7266 
7267 		/*
7268 		 * We created a kernel buffer corresponding to the
7269 		 * user buffer. Now copy in from the user buffer
7270 		 */
7271 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7272 				   (u32) (ioc->sgl[i].iov_len))) {
7273 			error = -EFAULT;
7274 			goto out;
7275 		}
7276 	}
7277 
7278 	if (ioc->sense_len) {
7279 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7280 					     &sense_handle, GFP_KERNEL);
7281 		if (!sense) {
7282 			error = -ENOMEM;
7283 			goto out;
7284 		}
7285 
7286 		sense_ptr =
7287 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7288 		if (instance->consistent_mask_64bit)
7289 			*sense_ptr = cpu_to_le64(sense_handle);
7290 		else
7291 			*sense_ptr = cpu_to_le32(sense_handle);
7292 	}
7293 
7294 	/*
7295 	 * Set the sync_cmd flag so that the ISR knows not to complete this
7296 	 * cmd to the SCSI mid-layer
7297 	 */
7298 	cmd->sync_cmd = 1;
7299 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7300 		cmd->sync_cmd = 0;
7301 		dev_err(&instance->pdev->dev,
7302 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7303 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7304 			cmd->cmd_status_drv);
7305 		return -EBUSY;
7306 	}
7307 
7308 	cmd->sync_cmd = 0;
7309 
7310 	if (instance->unload == 1) {
7311 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
7312 			"don't submit data to application\n");
7313 		goto out;
7314 	}
7315 	/*
7316 	 * copy out the kernel buffers to user buffers
7317 	 */
7318 	for (i = 0; i < ioc->sge_count; i++) {
7319 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7320 				 ioc->sgl[i].iov_len)) {
7321 			error = -EFAULT;
7322 			goto out;
7323 		}
7324 	}
7325 
7326 	/*
7327 	 * copy out the sense
7328 	 */
7329 	if (ioc->sense_len) {
7330 		/*
7331 		 * sense_ptr points to the location that has the user
7332 		 * sense buffer address
7333 		 */
7334 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7335 				ioc->sense_off);
7336 
7337 		if (copy_to_user((void __user *)((unsigned long)
7338 				 get_unaligned((unsigned long *)sense_ptr)),
7339 				 sense, ioc->sense_len)) {
7340 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
7341 					"sense data\n");
7342 			error = -EFAULT;
7343 			goto out;
7344 		}
7345 	}
7346 
7347 	/*
7348 	 * copy the status codes returned by the fw
7349 	 */
7350 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7351 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7352 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7353 		error = -EFAULT;
7354 	}
7355 
7356 out:
7357 	if (sense) {
7358 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7359 				    sense, sense_handle);
7360 	}
7361 
7362 	for (i = 0; i < ioc->sge_count; i++) {
7363 		if (kbuff_arr[i]) {
7364 			if (instance->consistent_mask_64bit)
7365 				dma_free_coherent(&instance->pdev->dev,
7366 					le32_to_cpu(kern_sge64[i].length),
7367 					kbuff_arr[i],
7368 					le64_to_cpu(kern_sge64[i].phys_addr));
7369 			else
7370 				dma_free_coherent(&instance->pdev->dev,
7371 					le32_to_cpu(kern_sge32[i].length),
7372 					kbuff_arr[i],
7373 					le32_to_cpu(kern_sge32[i].phys_addr));
7374 			kbuff_arr[i] = NULL;
7375 		}
7376 	}
7377 
7378 	megasas_return_cmd(instance, cmd);
7379 	return error;
7380 }
7381 
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)7382 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7383 {
7384 	struct megasas_iocpacket __user *user_ioc =
7385 	    (struct megasas_iocpacket __user *)arg;
7386 	struct megasas_iocpacket *ioc;
7387 	struct megasas_instance *instance;
7388 	int error;
7389 
7390 	ioc = memdup_user(user_ioc, sizeof(*ioc));
7391 	if (IS_ERR(ioc))
7392 		return PTR_ERR(ioc);
7393 
7394 	instance = megasas_lookup_instance(ioc->host_no);
7395 	if (!instance) {
7396 		error = -ENODEV;
7397 		goto out_kfree_ioc;
7398 	}
7399 
7400 	/* Block ioctls in VF mode */
7401 	if (instance->requestorId && !allow_vf_ioctls) {
7402 		error = -ENODEV;
7403 		goto out_kfree_ioc;
7404 	}
7405 
7406 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7407 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
7408 		error = -ENODEV;
7409 		goto out_kfree_ioc;
7410 	}
7411 
7412 	if (instance->unload == 1) {
7413 		error = -ENODEV;
7414 		goto out_kfree_ioc;
7415 	}
7416 
7417 	if (down_interruptible(&instance->ioctl_sem)) {
7418 		error = -ERESTARTSYS;
7419 		goto out_kfree_ioc;
7420 	}
7421 
7422 	if  (megasas_wait_for_adapter_operational(instance)) {
7423 		error = -ENODEV;
7424 		goto out_up;
7425 	}
7426 
7427 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7428 out_up:
7429 	up(&instance->ioctl_sem);
7430 
7431 out_kfree_ioc:
7432 	kfree(ioc);
7433 	return error;
7434 }
7435 
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)7436 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7437 {
7438 	struct megasas_instance *instance;
7439 	struct megasas_aen aen;
7440 	int error;
7441 
7442 	if (file->private_data != file) {
7443 		printk(KERN_DEBUG "megasas: fasync_helper was not "
7444 		       "called first\n");
7445 		return -EINVAL;
7446 	}
7447 
7448 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7449 		return -EFAULT;
7450 
7451 	instance = megasas_lookup_instance(aen.host_no);
7452 
7453 	if (!instance)
7454 		return -ENODEV;
7455 
7456 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7457 		return -ENODEV;
7458 	}
7459 
7460 	if (instance->unload == 1) {
7461 		return -ENODEV;
7462 	}
7463 
7464 	if  (megasas_wait_for_adapter_operational(instance))
7465 		return -ENODEV;
7466 
7467 	mutex_lock(&instance->reset_mutex);
7468 	error = megasas_register_aen(instance, aen.seq_num,
7469 				     aen.class_locale_word);
7470 	mutex_unlock(&instance->reset_mutex);
7471 	return error;
7472 }
7473 
7474 /**
7475  * megasas_mgmt_ioctl -	char node ioctl entry point
7476  */
7477 static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)7478 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7479 {
7480 	switch (cmd) {
7481 	case MEGASAS_IOC_FIRMWARE:
7482 		return megasas_mgmt_ioctl_fw(file, arg);
7483 
7484 	case MEGASAS_IOC_GET_AEN:
7485 		return megasas_mgmt_ioctl_aen(file, arg);
7486 	}
7487 
7488 	return -ENOTTY;
7489 }
7490 
7491 #ifdef CONFIG_COMPAT
megasas_mgmt_compat_ioctl_fw(struct file * file,unsigned long arg)7492 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7493 {
7494 	struct compat_megasas_iocpacket __user *cioc =
7495 	    (struct compat_megasas_iocpacket __user *)arg;
7496 	struct megasas_iocpacket __user *ioc =
7497 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7498 	int i;
7499 	int error = 0;
7500 	compat_uptr_t ptr;
7501 	u32 local_sense_off;
7502 	u32 local_sense_len;
7503 	u32 user_sense_off;
7504 
7505 	if (clear_user(ioc, sizeof(*ioc)))
7506 		return -EFAULT;
7507 
7508 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7509 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7510 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7511 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7512 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7513 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7514 		return -EFAULT;
7515 
7516 	/*
7517 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7518 	 * sense_len is not null, so prepare the 64bit value under
7519 	 * the same condition.
7520 	 */
7521 	if (get_user(local_sense_off, &ioc->sense_off) ||
7522 		get_user(local_sense_len, &ioc->sense_len) ||
7523 		get_user(user_sense_off, &cioc->sense_off))
7524 		return -EFAULT;
7525 
7526 	if (local_sense_len) {
7527 		void __user **sense_ioc_ptr =
7528 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7529 		compat_uptr_t *sense_cioc_ptr =
7530 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7531 		if (get_user(ptr, sense_cioc_ptr) ||
7532 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
7533 			return -EFAULT;
7534 	}
7535 
7536 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
7537 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7538 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7539 		    copy_in_user(&ioc->sgl[i].iov_len,
7540 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7541 			return -EFAULT;
7542 	}
7543 
7544 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7545 
7546 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
7547 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7548 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7549 		return -EFAULT;
7550 	}
7551 	return error;
7552 }
7553 
7554 static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)7555 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7556 			  unsigned long arg)
7557 {
7558 	switch (cmd) {
7559 	case MEGASAS_IOC_FIRMWARE32:
7560 		return megasas_mgmt_compat_ioctl_fw(file, arg);
7561 	case MEGASAS_IOC_GET_AEN:
7562 		return megasas_mgmt_ioctl_aen(file, arg);
7563 	}
7564 
7565 	return -ENOTTY;
7566 }
7567 #endif
7568 
7569 /*
7570  * File operations structure for management interface
7571  */
7572 static const struct file_operations megasas_mgmt_fops = {
7573 	.owner = THIS_MODULE,
7574 	.open = megasas_mgmt_open,
7575 	.fasync = megasas_mgmt_fasync,
7576 	.unlocked_ioctl = megasas_mgmt_ioctl,
7577 	.poll = megasas_mgmt_poll,
7578 #ifdef CONFIG_COMPAT
7579 	.compat_ioctl = megasas_mgmt_compat_ioctl,
7580 #endif
7581 	.llseek = noop_llseek,
7582 };
7583 
7584 /*
7585  * PCI hotplug support registration structure
7586  */
7587 static struct pci_driver megasas_pci_driver = {
7588 
7589 	.name = "megaraid_sas",
7590 	.id_table = megasas_pci_table,
7591 	.probe = megasas_probe_one,
7592 	.remove = megasas_detach_one,
7593 	.suspend = megasas_suspend,
7594 	.resume = megasas_resume,
7595 	.shutdown = megasas_shutdown,
7596 };
7597 
7598 /*
7599  * Sysfs driver attributes
7600  */
version_show(struct device_driver * dd,char * buf)7601 static ssize_t version_show(struct device_driver *dd, char *buf)
7602 {
7603 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7604 			MEGASAS_VERSION);
7605 }
7606 static DRIVER_ATTR_RO(version);
7607 
release_date_show(struct device_driver * dd,char * buf)7608 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7609 {
7610 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7611 		MEGASAS_RELDATE);
7612 }
7613 static DRIVER_ATTR_RO(release_date);
7614 
support_poll_for_event_show(struct device_driver * dd,char * buf)7615 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7616 {
7617 	return sprintf(buf, "%u\n", support_poll_for_event);
7618 }
7619 static DRIVER_ATTR_RO(support_poll_for_event);
7620 
support_device_change_show(struct device_driver * dd,char * buf)7621 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7622 {
7623 	return sprintf(buf, "%u\n", support_device_change);
7624 }
7625 static DRIVER_ATTR_RO(support_device_change);
7626 
dbg_lvl_show(struct device_driver * dd,char * buf)7627 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7628 {
7629 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
7630 }
7631 
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)7632 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7633 			     size_t count)
7634 {
7635 	int retval = count;
7636 
7637 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7638 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7639 		retval = -EINVAL;
7640 	}
7641 	return retval;
7642 }
7643 static DRIVER_ATTR_RW(dbg_lvl);
7644 
7645 static ssize_t
support_nvme_encapsulation_show(struct device_driver * dd,char * buf)7646 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
7647 {
7648 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
7649 }
7650 
7651 static DRIVER_ATTR_RO(support_nvme_encapsulation);
7652 
megasas_remove_scsi_device(struct scsi_device * sdev)7653 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7654 {
7655 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7656 	scsi_remove_device(sdev);
7657 	scsi_device_put(sdev);
7658 }
7659 
7660 static void
megasas_aen_polling(struct work_struct * work)7661 megasas_aen_polling(struct work_struct *work)
7662 {
7663 	struct megasas_aen_event *ev =
7664 		container_of(work, struct megasas_aen_event, hotplug_work.work);
7665 	struct megasas_instance *instance = ev->instance;
7666 	union megasas_evt_class_locale class_locale;
7667 	struct  Scsi_Host *host;
7668 	struct  scsi_device *sdev1;
7669 	u16     pd_index = 0;
7670 	u16	ld_index = 0;
7671 	int     i, j, doscan = 0;
7672 	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7673 	int error;
7674 	u8  dcmd_ret = DCMD_SUCCESS;
7675 
7676 	if (!instance) {
7677 		printk(KERN_ERR "invalid instance!\n");
7678 		kfree(ev);
7679 		return;
7680 	}
7681 
7682 	/* Adjust event workqueue thread wait time for VF mode */
7683 	if (instance->requestorId)
7684 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7685 
7686 	/* Don't run the event workqueue thread if OCR is running */
7687 	mutex_lock(&instance->reset_mutex);
7688 
7689 	instance->ev = NULL;
7690 	host = instance->host;
7691 	if (instance->evt_detail) {
7692 		megasas_decode_evt(instance);
7693 
7694 		switch (le32_to_cpu(instance->evt_detail->code)) {
7695 
7696 		case MR_EVT_PD_INSERTED:
7697 		case MR_EVT_PD_REMOVED:
7698 			dcmd_ret = megasas_get_pd_list(instance);
7699 			if (dcmd_ret == DCMD_SUCCESS)
7700 				doscan = SCAN_PD_CHANNEL;
7701 			break;
7702 
7703 		case MR_EVT_LD_OFFLINE:
7704 		case MR_EVT_CFG_CLEARED:
7705 		case MR_EVT_LD_DELETED:
7706 		case MR_EVT_LD_CREATED:
7707 			if (!instance->requestorId ||
7708 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7709 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7710 
7711 			if (dcmd_ret == DCMD_SUCCESS)
7712 				doscan = SCAN_VD_CHANNEL;
7713 
7714 			break;
7715 
7716 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7717 		case MR_EVT_FOREIGN_CFG_IMPORTED:
7718 		case MR_EVT_LD_STATE_CHANGE:
7719 			dcmd_ret = megasas_get_pd_list(instance);
7720 
7721 			if (dcmd_ret != DCMD_SUCCESS)
7722 				break;
7723 
7724 			if (!instance->requestorId ||
7725 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7726 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7727 
7728 			if (dcmd_ret != DCMD_SUCCESS)
7729 				break;
7730 
7731 			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7732 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7733 				instance->host->host_no);
7734 			break;
7735 
7736 		case MR_EVT_CTRL_PROP_CHANGED:
7737 				dcmd_ret = megasas_get_ctrl_info(instance);
7738 				break;
7739 		default:
7740 			doscan = 0;
7741 			break;
7742 		}
7743 	} else {
7744 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7745 		mutex_unlock(&instance->reset_mutex);
7746 		kfree(ev);
7747 		return;
7748 	}
7749 
7750 	mutex_unlock(&instance->reset_mutex);
7751 
7752 	if (doscan & SCAN_PD_CHANNEL) {
7753 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7754 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7755 				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7756 				sdev1 = scsi_device_lookup(host, i, j, 0);
7757 				if (instance->pd_list[pd_index].driveState ==
7758 							MR_PD_STATE_SYSTEM) {
7759 					if (!sdev1)
7760 						scsi_add_device(host, i, j, 0);
7761 					else
7762 						scsi_device_put(sdev1);
7763 				} else {
7764 					if (sdev1)
7765 						megasas_remove_scsi_device(sdev1);
7766 				}
7767 			}
7768 		}
7769 	}
7770 
7771 	if (doscan & SCAN_VD_CHANNEL) {
7772 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7773 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7774 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7775 				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7776 				if (instance->ld_ids[ld_index] != 0xff) {
7777 					if (!sdev1)
7778 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7779 					else
7780 						scsi_device_put(sdev1);
7781 				} else {
7782 					if (sdev1)
7783 						megasas_remove_scsi_device(sdev1);
7784 				}
7785 			}
7786 		}
7787 	}
7788 
7789 	if (dcmd_ret == DCMD_SUCCESS)
7790 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7791 	else
7792 		seq_num = instance->last_seq_num;
7793 
7794 	/* Register AEN with FW for latest sequence number plus 1 */
7795 	class_locale.members.reserved = 0;
7796 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
7797 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
7798 
7799 	if (instance->aen_cmd != NULL) {
7800 		kfree(ev);
7801 		return;
7802 	}
7803 
7804 	mutex_lock(&instance->reset_mutex);
7805 	error = megasas_register_aen(instance, seq_num,
7806 					class_locale.word);
7807 	if (error)
7808 		dev_err(&instance->pdev->dev,
7809 			"register aen failed error %x\n", error);
7810 
7811 	mutex_unlock(&instance->reset_mutex);
7812 	kfree(ev);
7813 }
7814 
7815 /**
7816  * megasas_init - Driver load entry point
7817  */
megasas_init(void)7818 static int __init megasas_init(void)
7819 {
7820 	int rval;
7821 
7822 	/*
7823 	 * Booted in kdump kernel, minimize memory footprints by
7824 	 * disabling few features
7825 	 */
7826 	if (reset_devices) {
7827 		msix_vectors = 1;
7828 		rdpq_enable = 0;
7829 		dual_qdepth_disable = 1;
7830 	}
7831 
7832 	/*
7833 	 * Announce driver version and other information
7834 	 */
7835 	pr_info("megasas: %s\n", MEGASAS_VERSION);
7836 
7837 	spin_lock_init(&poll_aen_lock);
7838 
7839 	support_poll_for_event = 2;
7840 	support_device_change = 1;
7841 	support_nvme_encapsulation = true;
7842 
7843 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7844 
7845 	/*
7846 	 * Register character device node
7847 	 */
7848 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7849 
7850 	if (rval < 0) {
7851 		printk(KERN_DEBUG "megasas: failed to open device node\n");
7852 		return rval;
7853 	}
7854 
7855 	megasas_mgmt_majorno = rval;
7856 
7857 	/*
7858 	 * Register ourselves as PCI hotplug module
7859 	 */
7860 	rval = pci_register_driver(&megasas_pci_driver);
7861 
7862 	if (rval) {
7863 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7864 		goto err_pcidrv;
7865 	}
7866 
7867 	rval = driver_create_file(&megasas_pci_driver.driver,
7868 				  &driver_attr_version);
7869 	if (rval)
7870 		goto err_dcf_attr_ver;
7871 
7872 	rval = driver_create_file(&megasas_pci_driver.driver,
7873 				  &driver_attr_release_date);
7874 	if (rval)
7875 		goto err_dcf_rel_date;
7876 
7877 	rval = driver_create_file(&megasas_pci_driver.driver,
7878 				&driver_attr_support_poll_for_event);
7879 	if (rval)
7880 		goto err_dcf_support_poll_for_event;
7881 
7882 	rval = driver_create_file(&megasas_pci_driver.driver,
7883 				  &driver_attr_dbg_lvl);
7884 	if (rval)
7885 		goto err_dcf_dbg_lvl;
7886 	rval = driver_create_file(&megasas_pci_driver.driver,
7887 				&driver_attr_support_device_change);
7888 	if (rval)
7889 		goto err_dcf_support_device_change;
7890 
7891 	rval = driver_create_file(&megasas_pci_driver.driver,
7892 				  &driver_attr_support_nvme_encapsulation);
7893 	if (rval)
7894 		goto err_dcf_support_nvme_encapsulation;
7895 
7896 	return rval;
7897 
7898 err_dcf_support_nvme_encapsulation:
7899 	driver_remove_file(&megasas_pci_driver.driver,
7900 			   &driver_attr_support_device_change);
7901 
7902 err_dcf_support_device_change:
7903 	driver_remove_file(&megasas_pci_driver.driver,
7904 			   &driver_attr_dbg_lvl);
7905 err_dcf_dbg_lvl:
7906 	driver_remove_file(&megasas_pci_driver.driver,
7907 			&driver_attr_support_poll_for_event);
7908 err_dcf_support_poll_for_event:
7909 	driver_remove_file(&megasas_pci_driver.driver,
7910 			   &driver_attr_release_date);
7911 err_dcf_rel_date:
7912 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7913 err_dcf_attr_ver:
7914 	pci_unregister_driver(&megasas_pci_driver);
7915 err_pcidrv:
7916 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7917 	return rval;
7918 }
7919 
7920 /**
7921  * megasas_exit - Driver unload entry point
7922  */
megasas_exit(void)7923 static void __exit megasas_exit(void)
7924 {
7925 	driver_remove_file(&megasas_pci_driver.driver,
7926 			   &driver_attr_dbg_lvl);
7927 	driver_remove_file(&megasas_pci_driver.driver,
7928 			&driver_attr_support_poll_for_event);
7929 	driver_remove_file(&megasas_pci_driver.driver,
7930 			&driver_attr_support_device_change);
7931 	driver_remove_file(&megasas_pci_driver.driver,
7932 			   &driver_attr_release_date);
7933 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7934 	driver_remove_file(&megasas_pci_driver.driver,
7935 			   &driver_attr_support_nvme_encapsulation);
7936 
7937 	pci_unregister_driver(&megasas_pci_driver);
7938 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7939 }
7940 
7941 module_init(megasas_init);
7942 module_exit(megasas_exit);
7943