1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
8 *
9 * Authors: Broadcom Inc.
10 * Sreenivas Bagalkote
11 * Sumant Patro
12 * Bo Yang
13 * Adam Radford
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
16 *
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49
50 /*
51 * Number of sectors per IO command
52 * Will be set in megasas_init_mfi if user does not provide
53 */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 "Maximum number of sectors per IO command");
58
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
75
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79
80 static int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83
84 static int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91
92 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 "interrupt coalescing is enabled only on high iops queues\n\t\t"
101 "1 - iops: High iops queues are not allocated &\n\t\t"
102 "interrupt coalescing is enabled on all queues\n\t\t"
103 "2 - latency: High iops queues are not allocated &\n\t\t"
104 "interrupt coalescing is disabled on all queues\n\t\t"
105 "default mode is 'balanced'"
106 );
107
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111
112 unsigned int enable_sdev_max_qd;
113 module_param(enable_sdev_max_qd, int, 0444);
114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(MEGASAS_VERSION);
118 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
120
121 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122 static int megasas_get_pd_list(struct megasas_instance *instance);
123 static int megasas_ld_list_query(struct megasas_instance *instance,
124 u8 query_type);
125 static int megasas_issue_init_mfi(struct megasas_instance *instance);
126 static int megasas_register_aen(struct megasas_instance *instance,
127 u32 seq_num, u32 class_locale_word);
128 static void megasas_get_pd_info(struct megasas_instance *instance,
129 struct scsi_device *sdev);
130
131 /*
132 * PCI ID table for all supported controllers
133 */
134 static struct pci_device_id megasas_pci_table[] = {
135
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
137 /* xscale IOP */
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
139 /* ppc IOP */
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
141 /* ppc IOP */
142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
143 /* gen2*/
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
145 /* gen2*/
146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
147 /* skinny*/
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
149 /* skinny*/
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
151 /* xscale IOP, vega */
152 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
153 /* xscale IOP */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
155 /* Fusion */
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
157 /* Plasma */
158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
159 /* Invader */
160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
161 /* Fury */
162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
163 /* Intruder */
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
165 /* Intruder 24 port*/
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
168 /* VENTURA */
169 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
171 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
173 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
175 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
177 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
179 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
181 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
183 {}
184 };
185
186 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
187
188 static int megasas_mgmt_majorno;
189 struct megasas_mgmt_info megasas_mgmt_info;
190 static struct fasync_struct *megasas_async_queue;
191 static DEFINE_MUTEX(megasas_async_queue_mutex);
192
193 static int megasas_poll_wait_aen;
194 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
195 static u32 support_poll_for_event;
196 u32 megasas_dbg_lvl;
197 static u32 support_device_change;
198 static bool support_nvme_encapsulation;
199 static bool support_pci_lane_margining;
200
201 /* define lock for aen poll */
202 static spinlock_t poll_aen_lock;
203
204 extern struct dentry *megasas_debugfs_root;
205
206 void
207 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
208 u8 alt_status);
209 static u32
210 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
211 static int
212 megasas_adp_reset_gen2(struct megasas_instance *instance,
213 struct megasas_register_set __iomem *reg_set);
214 static irqreturn_t megasas_isr(int irq, void *devp);
215 static u32
216 megasas_init_adapter_mfi(struct megasas_instance *instance);
217 u32
218 megasas_build_and_issue_cmd(struct megasas_instance *instance,
219 struct scsi_cmnd *scmd);
220 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
221 int
222 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
223 int seconds);
224 void megasas_fusion_ocr_wq(struct work_struct *work);
225 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
226 int initial);
227 static int
228 megasas_set_dma_mask(struct megasas_instance *instance);
229 static int
230 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
231 static inline void
232 megasas_free_ctrl_mem(struct megasas_instance *instance);
233 static inline int
234 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
237 static inline void
238 megasas_init_ctrl_params(struct megasas_instance *instance);
239
megasas_readl(struct megasas_instance * instance,const volatile void __iomem * addr)240 u32 megasas_readl(struct megasas_instance *instance,
241 const volatile void __iomem *addr)
242 {
243 u32 i = 0, ret_val;
244 /*
245 * Due to a HW errata in Aero controllers, reads to certain
246 * Fusion registers could intermittently return all zeroes.
247 * This behavior is transient in nature and subsequent reads will
248 * return valid value. As a workaround in driver, retry readl for
249 * upto three times until a non-zero value is read.
250 */
251 if (instance->adapter_type == AERO_SERIES) {
252 do {
253 ret_val = readl(addr);
254 i++;
255 } while (ret_val == 0 && i < 3);
256 return ret_val;
257 } else {
258 return readl(addr);
259 }
260 }
261
262 /**
263 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
264 * @instance: Adapter soft state
265 * @dcmd: DCMD frame inside MFI command
266 * @dma_addr: DMA address of buffer to be passed to FW
267 * @dma_len: Length of DMA buffer to be passed to FW
268 * @return: void
269 */
megasas_set_dma_settings(struct megasas_instance * instance,struct megasas_dcmd_frame * dcmd,dma_addr_t dma_addr,u32 dma_len)270 void megasas_set_dma_settings(struct megasas_instance *instance,
271 struct megasas_dcmd_frame *dcmd,
272 dma_addr_t dma_addr, u32 dma_len)
273 {
274 if (instance->consistent_mask_64bit) {
275 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
276 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
277 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
278
279 } else {
280 dcmd->sgl.sge32[0].phys_addr =
281 cpu_to_le32(lower_32_bits(dma_addr));
282 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
283 dcmd->flags = cpu_to_le16(dcmd->flags);
284 }
285 }
286
287 static void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)288 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
289 {
290 instance->instancet->fire_cmd(instance,
291 cmd->frame_phys_addr, 0, instance->reg_set);
292 return;
293 }
294
295 /**
296 * megasas_get_cmd - Get a command from the free pool
297 * @instance: Adapter soft state
298 *
299 * Returns a free command from the pool
300 */
megasas_get_cmd(struct megasas_instance * instance)301 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
302 *instance)
303 {
304 unsigned long flags;
305 struct megasas_cmd *cmd = NULL;
306
307 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
308
309 if (!list_empty(&instance->cmd_pool)) {
310 cmd = list_entry((&instance->cmd_pool)->next,
311 struct megasas_cmd, list);
312 list_del_init(&cmd->list);
313 } else {
314 dev_err(&instance->pdev->dev, "Command pool empty!\n");
315 }
316
317 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
318 return cmd;
319 }
320
321 /**
322 * megasas_return_cmd - Return a cmd to free command pool
323 * @instance: Adapter soft state
324 * @cmd: Command packet to be returned to free command pool
325 */
326 void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)327 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
328 {
329 unsigned long flags;
330 u32 blk_tags;
331 struct megasas_cmd_fusion *cmd_fusion;
332 struct fusion_context *fusion = instance->ctrl_context;
333
334 /* This flag is used only for fusion adapter.
335 * Wait for Interrupt for Polled mode DCMD
336 */
337 if (cmd->flags & DRV_DCMD_POLLED_MODE)
338 return;
339
340 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
341
342 if (fusion) {
343 blk_tags = instance->max_scsi_cmds + cmd->index;
344 cmd_fusion = fusion->cmd_list[blk_tags];
345 megasas_return_cmd_fusion(instance, cmd_fusion);
346 }
347 cmd->scmd = NULL;
348 cmd->frame_count = 0;
349 cmd->flags = 0;
350 memset(cmd->frame, 0, instance->mfi_frame_size);
351 cmd->frame->io.context = cpu_to_le32(cmd->index);
352 if (!fusion && reset_devices)
353 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
354 list_add(&cmd->list, (&instance->cmd_pool)->next);
355
356 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
357
358 }
359
360 static const char *
format_timestamp(uint32_t timestamp)361 format_timestamp(uint32_t timestamp)
362 {
363 static char buffer[32];
364
365 if ((timestamp & 0xff000000) == 0xff000000)
366 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
367 0x00ffffff);
368 else
369 snprintf(buffer, sizeof(buffer), "%us", timestamp);
370 return buffer;
371 }
372
373 static const char *
format_class(int8_t class)374 format_class(int8_t class)
375 {
376 static char buffer[6];
377
378 switch (class) {
379 case MFI_EVT_CLASS_DEBUG:
380 return "debug";
381 case MFI_EVT_CLASS_PROGRESS:
382 return "progress";
383 case MFI_EVT_CLASS_INFO:
384 return "info";
385 case MFI_EVT_CLASS_WARNING:
386 return "WARN";
387 case MFI_EVT_CLASS_CRITICAL:
388 return "CRIT";
389 case MFI_EVT_CLASS_FATAL:
390 return "FATAL";
391 case MFI_EVT_CLASS_DEAD:
392 return "DEAD";
393 default:
394 snprintf(buffer, sizeof(buffer), "%d", class);
395 return buffer;
396 }
397 }
398
399 /**
400 * megasas_decode_evt: Decode FW AEN event and print critical event
401 * for information.
402 * @instance: Adapter soft state
403 */
404 static void
megasas_decode_evt(struct megasas_instance * instance)405 megasas_decode_evt(struct megasas_instance *instance)
406 {
407 struct megasas_evt_detail *evt_detail = instance->evt_detail;
408 union megasas_evt_class_locale class_locale;
409 class_locale.word = le32_to_cpu(evt_detail->cl.word);
410
411 if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
412 (event_log_level > MFI_EVT_CLASS_DEAD)) {
413 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
414 event_log_level = MFI_EVT_CLASS_CRITICAL;
415 }
416
417 if (class_locale.members.class >= event_log_level)
418 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
419 le32_to_cpu(evt_detail->seq_num),
420 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
421 (class_locale.members.locale),
422 format_class(class_locale.members.class),
423 evt_detail->description);
424 }
425
426 /*
427 * The following functions are defined for xscale
428 * (deviceid : 1064R, PERC5) controllers
429 */
430
431 /**
432 * megasas_enable_intr_xscale - Enables interrupts
433 * @instance: Adapter soft state
434 */
435 static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)436 megasas_enable_intr_xscale(struct megasas_instance *instance)
437 {
438 struct megasas_register_set __iomem *regs;
439
440 regs = instance->reg_set;
441 writel(0, &(regs)->outbound_intr_mask);
442
443 /* Dummy readl to force pci flush */
444 readl(®s->outbound_intr_mask);
445 }
446
447 /**
448 * megasas_disable_intr_xscale -Disables interrupt
449 * @instance: Adapter soft state
450 */
451 static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)452 megasas_disable_intr_xscale(struct megasas_instance *instance)
453 {
454 struct megasas_register_set __iomem *regs;
455 u32 mask = 0x1f;
456
457 regs = instance->reg_set;
458 writel(mask, ®s->outbound_intr_mask);
459 /* Dummy readl to force pci flush */
460 readl(®s->outbound_intr_mask);
461 }
462
463 /**
464 * megasas_read_fw_status_reg_xscale - returns the current FW status value
465 * @instance: Adapter soft state
466 */
467 static u32
megasas_read_fw_status_reg_xscale(struct megasas_instance * instance)468 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
469 {
470 return readl(&instance->reg_set->outbound_msg_0);
471 }
472 /**
473 * megasas_clear_interrupt_xscale - Check & clear interrupt
474 * @instance: Adapter soft state
475 */
476 static int
megasas_clear_intr_xscale(struct megasas_instance * instance)477 megasas_clear_intr_xscale(struct megasas_instance *instance)
478 {
479 u32 status;
480 u32 mfiStatus = 0;
481 struct megasas_register_set __iomem *regs;
482 regs = instance->reg_set;
483
484 /*
485 * Check if it is our interrupt
486 */
487 status = readl(®s->outbound_intr_status);
488
489 if (status & MFI_OB_INTR_STATUS_MASK)
490 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
491 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
492 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
493
494 /*
495 * Clear the interrupt by writing back the same value
496 */
497 if (mfiStatus)
498 writel(status, ®s->outbound_intr_status);
499
500 /* Dummy readl to force pci flush */
501 readl(®s->outbound_intr_status);
502
503 return mfiStatus;
504 }
505
506 /**
507 * megasas_fire_cmd_xscale - Sends command to the FW
508 * @instance: Adapter soft state
509 * @frame_phys_addr : Physical address of cmd
510 * @frame_count : Number of frames for the command
511 * @regs : MFI register set
512 */
513 static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)514 megasas_fire_cmd_xscale(struct megasas_instance *instance,
515 dma_addr_t frame_phys_addr,
516 u32 frame_count,
517 struct megasas_register_set __iomem *regs)
518 {
519 unsigned long flags;
520
521 spin_lock_irqsave(&instance->hba_lock, flags);
522 writel((frame_phys_addr >> 3)|(frame_count),
523 &(regs)->inbound_queue_port);
524 spin_unlock_irqrestore(&instance->hba_lock, flags);
525 }
526
527 /**
528 * megasas_adp_reset_xscale - For controller reset
529 * @instance: Adapter soft state
530 * @regs: MFI register set
531 */
532 static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)533 megasas_adp_reset_xscale(struct megasas_instance *instance,
534 struct megasas_register_set __iomem *regs)
535 {
536 u32 i;
537 u32 pcidata;
538
539 writel(MFI_ADP_RESET, ®s->inbound_doorbell);
540
541 for (i = 0; i < 3; i++)
542 msleep(1000); /* sleep for 3 secs */
543 pcidata = 0;
544 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
545 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
546 if (pcidata & 0x2) {
547 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
548 pcidata &= ~0x2;
549 pci_write_config_dword(instance->pdev,
550 MFI_1068_PCSR_OFFSET, pcidata);
551
552 for (i = 0; i < 2; i++)
553 msleep(1000); /* need to wait 2 secs again */
554
555 pcidata = 0;
556 pci_read_config_dword(instance->pdev,
557 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
558 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
559 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
560 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
561 pcidata = 0;
562 pci_write_config_dword(instance->pdev,
563 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
564 }
565 }
566 return 0;
567 }
568
569 /**
570 * megasas_check_reset_xscale - For controller reset check
571 * @instance: Adapter soft state
572 * @regs: MFI register set
573 */
574 static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)575 megasas_check_reset_xscale(struct megasas_instance *instance,
576 struct megasas_register_set __iomem *regs)
577 {
578 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
579 (le32_to_cpu(*instance->consumer) ==
580 MEGASAS_ADPRESET_INPROG_SIGN))
581 return 1;
582 return 0;
583 }
584
585 static struct megasas_instance_template megasas_instance_template_xscale = {
586
587 .fire_cmd = megasas_fire_cmd_xscale,
588 .enable_intr = megasas_enable_intr_xscale,
589 .disable_intr = megasas_disable_intr_xscale,
590 .clear_intr = megasas_clear_intr_xscale,
591 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
592 .adp_reset = megasas_adp_reset_xscale,
593 .check_reset = megasas_check_reset_xscale,
594 .service_isr = megasas_isr,
595 .tasklet = megasas_complete_cmd_dpc,
596 .init_adapter = megasas_init_adapter_mfi,
597 .build_and_issue_cmd = megasas_build_and_issue_cmd,
598 .issue_dcmd = megasas_issue_dcmd,
599 };
600
601 /*
602 * This is the end of set of functions & definitions specific
603 * to xscale (deviceid : 1064R, PERC5) controllers
604 */
605
606 /*
607 * The following functions are defined for ppc (deviceid : 0x60)
608 * controllers
609 */
610
611 /**
612 * megasas_enable_intr_ppc - Enables interrupts
613 * @instance: Adapter soft state
614 */
615 static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)616 megasas_enable_intr_ppc(struct megasas_instance *instance)
617 {
618 struct megasas_register_set __iomem *regs;
619
620 regs = instance->reg_set;
621 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
622
623 writel(~0x80000000, &(regs)->outbound_intr_mask);
624
625 /* Dummy readl to force pci flush */
626 readl(®s->outbound_intr_mask);
627 }
628
629 /**
630 * megasas_disable_intr_ppc - Disable interrupt
631 * @instance: Adapter soft state
632 */
633 static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)634 megasas_disable_intr_ppc(struct megasas_instance *instance)
635 {
636 struct megasas_register_set __iomem *regs;
637 u32 mask = 0xFFFFFFFF;
638
639 regs = instance->reg_set;
640 writel(mask, ®s->outbound_intr_mask);
641 /* Dummy readl to force pci flush */
642 readl(®s->outbound_intr_mask);
643 }
644
645 /**
646 * megasas_read_fw_status_reg_ppc - returns the current FW status value
647 * @instance: Adapter soft state
648 */
649 static u32
megasas_read_fw_status_reg_ppc(struct megasas_instance * instance)650 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
651 {
652 return readl(&instance->reg_set->outbound_scratch_pad_0);
653 }
654
655 /**
656 * megasas_clear_interrupt_ppc - Check & clear interrupt
657 * @instance: Adapter soft state
658 */
659 static int
megasas_clear_intr_ppc(struct megasas_instance * instance)660 megasas_clear_intr_ppc(struct megasas_instance *instance)
661 {
662 u32 status, mfiStatus = 0;
663 struct megasas_register_set __iomem *regs;
664 regs = instance->reg_set;
665
666 /*
667 * Check if it is our interrupt
668 */
669 status = readl(®s->outbound_intr_status);
670
671 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
672 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
673
674 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
675 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
676
677 /*
678 * Clear the interrupt by writing back the same value
679 */
680 writel(status, ®s->outbound_doorbell_clear);
681
682 /* Dummy readl to force pci flush */
683 readl(®s->outbound_doorbell_clear);
684
685 return mfiStatus;
686 }
687
688 /**
689 * megasas_fire_cmd_ppc - Sends command to the FW
690 * @instance: Adapter soft state
691 * @frame_phys_addr: Physical address of cmd
692 * @frame_count: Number of frames for the command
693 * @regs: MFI register set
694 */
695 static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)696 megasas_fire_cmd_ppc(struct megasas_instance *instance,
697 dma_addr_t frame_phys_addr,
698 u32 frame_count,
699 struct megasas_register_set __iomem *regs)
700 {
701 unsigned long flags;
702
703 spin_lock_irqsave(&instance->hba_lock, flags);
704 writel((frame_phys_addr | (frame_count<<1))|1,
705 &(regs)->inbound_queue_port);
706 spin_unlock_irqrestore(&instance->hba_lock, flags);
707 }
708
709 /**
710 * megasas_check_reset_ppc - For controller reset check
711 * @instance: Adapter soft state
712 * @regs: MFI register set
713 */
714 static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)715 megasas_check_reset_ppc(struct megasas_instance *instance,
716 struct megasas_register_set __iomem *regs)
717 {
718 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
719 return 1;
720
721 return 0;
722 }
723
724 static struct megasas_instance_template megasas_instance_template_ppc = {
725
726 .fire_cmd = megasas_fire_cmd_ppc,
727 .enable_intr = megasas_enable_intr_ppc,
728 .disable_intr = megasas_disable_intr_ppc,
729 .clear_intr = megasas_clear_intr_ppc,
730 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
731 .adp_reset = megasas_adp_reset_xscale,
732 .check_reset = megasas_check_reset_ppc,
733 .service_isr = megasas_isr,
734 .tasklet = megasas_complete_cmd_dpc,
735 .init_adapter = megasas_init_adapter_mfi,
736 .build_and_issue_cmd = megasas_build_and_issue_cmd,
737 .issue_dcmd = megasas_issue_dcmd,
738 };
739
740 /**
741 * megasas_enable_intr_skinny - Enables interrupts
742 * @instance: Adapter soft state
743 */
744 static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)745 megasas_enable_intr_skinny(struct megasas_instance *instance)
746 {
747 struct megasas_register_set __iomem *regs;
748
749 regs = instance->reg_set;
750 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
751
752 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
753
754 /* Dummy readl to force pci flush */
755 readl(®s->outbound_intr_mask);
756 }
757
758 /**
759 * megasas_disable_intr_skinny - Disables interrupt
760 * @instance: Adapter soft state
761 */
762 static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)763 megasas_disable_intr_skinny(struct megasas_instance *instance)
764 {
765 struct megasas_register_set __iomem *regs;
766 u32 mask = 0xFFFFFFFF;
767
768 regs = instance->reg_set;
769 writel(mask, ®s->outbound_intr_mask);
770 /* Dummy readl to force pci flush */
771 readl(®s->outbound_intr_mask);
772 }
773
774 /**
775 * megasas_read_fw_status_reg_skinny - returns the current FW status value
776 * @instance: Adapter soft state
777 */
778 static u32
megasas_read_fw_status_reg_skinny(struct megasas_instance * instance)779 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
780 {
781 return readl(&instance->reg_set->outbound_scratch_pad_0);
782 }
783
784 /**
785 * megasas_clear_interrupt_skinny - Check & clear interrupt
786 * @instance: Adapter soft state
787 */
788 static int
megasas_clear_intr_skinny(struct megasas_instance * instance)789 megasas_clear_intr_skinny(struct megasas_instance *instance)
790 {
791 u32 status;
792 u32 mfiStatus = 0;
793 struct megasas_register_set __iomem *regs;
794 regs = instance->reg_set;
795
796 /*
797 * Check if it is our interrupt
798 */
799 status = readl(®s->outbound_intr_status);
800
801 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
802 return 0;
803 }
804
805 /*
806 * Check if it is our interrupt
807 */
808 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
809 MFI_STATE_FAULT) {
810 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
811 } else
812 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
813
814 /*
815 * Clear the interrupt by writing back the same value
816 */
817 writel(status, ®s->outbound_intr_status);
818
819 /*
820 * dummy read to flush PCI
821 */
822 readl(®s->outbound_intr_status);
823
824 return mfiStatus;
825 }
826
827 /**
828 * megasas_fire_cmd_skinny - Sends command to the FW
829 * @instance: Adapter soft state
830 * @frame_phys_addr: Physical address of cmd
831 * @frame_count: Number of frames for the command
832 * @regs: MFI register set
833 */
834 static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)835 megasas_fire_cmd_skinny(struct megasas_instance *instance,
836 dma_addr_t frame_phys_addr,
837 u32 frame_count,
838 struct megasas_register_set __iomem *regs)
839 {
840 unsigned long flags;
841
842 spin_lock_irqsave(&instance->hba_lock, flags);
843 writel(upper_32_bits(frame_phys_addr),
844 &(regs)->inbound_high_queue_port);
845 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
846 &(regs)->inbound_low_queue_port);
847 spin_unlock_irqrestore(&instance->hba_lock, flags);
848 }
849
850 /**
851 * megasas_check_reset_skinny - For controller reset check
852 * @instance: Adapter soft state
853 * @regs: MFI register set
854 */
855 static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)856 megasas_check_reset_skinny(struct megasas_instance *instance,
857 struct megasas_register_set __iomem *regs)
858 {
859 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
860 return 1;
861
862 return 0;
863 }
864
865 static struct megasas_instance_template megasas_instance_template_skinny = {
866
867 .fire_cmd = megasas_fire_cmd_skinny,
868 .enable_intr = megasas_enable_intr_skinny,
869 .disable_intr = megasas_disable_intr_skinny,
870 .clear_intr = megasas_clear_intr_skinny,
871 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
872 .adp_reset = megasas_adp_reset_gen2,
873 .check_reset = megasas_check_reset_skinny,
874 .service_isr = megasas_isr,
875 .tasklet = megasas_complete_cmd_dpc,
876 .init_adapter = megasas_init_adapter_mfi,
877 .build_and_issue_cmd = megasas_build_and_issue_cmd,
878 .issue_dcmd = megasas_issue_dcmd,
879 };
880
881
882 /*
883 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
884 * controllers
885 */
886
887 /**
888 * megasas_enable_intr_gen2 - Enables interrupts
889 * @instance: Adapter soft state
890 */
891 static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)892 megasas_enable_intr_gen2(struct megasas_instance *instance)
893 {
894 struct megasas_register_set __iomem *regs;
895
896 regs = instance->reg_set;
897 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
898
899 /* write ~0x00000005 (4 & 1) to the intr mask*/
900 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
901
902 /* Dummy readl to force pci flush */
903 readl(®s->outbound_intr_mask);
904 }
905
906 /**
907 * megasas_disable_intr_gen2 - Disables interrupt
908 * @instance: Adapter soft state
909 */
910 static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)911 megasas_disable_intr_gen2(struct megasas_instance *instance)
912 {
913 struct megasas_register_set __iomem *regs;
914 u32 mask = 0xFFFFFFFF;
915
916 regs = instance->reg_set;
917 writel(mask, ®s->outbound_intr_mask);
918 /* Dummy readl to force pci flush */
919 readl(®s->outbound_intr_mask);
920 }
921
922 /**
923 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
924 * @instance: Adapter soft state
925 */
926 static u32
megasas_read_fw_status_reg_gen2(struct megasas_instance * instance)927 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
928 {
929 return readl(&instance->reg_set->outbound_scratch_pad_0);
930 }
931
932 /**
933 * megasas_clear_interrupt_gen2 - Check & clear interrupt
934 * @instance: Adapter soft state
935 */
936 static int
megasas_clear_intr_gen2(struct megasas_instance * instance)937 megasas_clear_intr_gen2(struct megasas_instance *instance)
938 {
939 u32 status;
940 u32 mfiStatus = 0;
941 struct megasas_register_set __iomem *regs;
942 regs = instance->reg_set;
943
944 /*
945 * Check if it is our interrupt
946 */
947 status = readl(®s->outbound_intr_status);
948
949 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
950 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
951 }
952 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
953 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
954 }
955
956 /*
957 * Clear the interrupt by writing back the same value
958 */
959 if (mfiStatus)
960 writel(status, ®s->outbound_doorbell_clear);
961
962 /* Dummy readl to force pci flush */
963 readl(®s->outbound_intr_status);
964
965 return mfiStatus;
966 }
967
968 /**
969 * megasas_fire_cmd_gen2 - Sends command to the FW
970 * @instance: Adapter soft state
971 * @frame_phys_addr: Physical address of cmd
972 * @frame_count: Number of frames for the command
973 * @regs: MFI register set
974 */
975 static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)976 megasas_fire_cmd_gen2(struct megasas_instance *instance,
977 dma_addr_t frame_phys_addr,
978 u32 frame_count,
979 struct megasas_register_set __iomem *regs)
980 {
981 unsigned long flags;
982
983 spin_lock_irqsave(&instance->hba_lock, flags);
984 writel((frame_phys_addr | (frame_count<<1))|1,
985 &(regs)->inbound_queue_port);
986 spin_unlock_irqrestore(&instance->hba_lock, flags);
987 }
988
989 /**
990 * megasas_adp_reset_gen2 - For controller reset
991 * @instance: Adapter soft state
992 * @reg_set: MFI register set
993 */
994 static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)995 megasas_adp_reset_gen2(struct megasas_instance *instance,
996 struct megasas_register_set __iomem *reg_set)
997 {
998 u32 retry = 0 ;
999 u32 HostDiag;
1000 u32 __iomem *seq_offset = ®_set->seq_offset;
1001 u32 __iomem *hostdiag_offset = ®_set->host_diag;
1002
1003 if (instance->instancet == &megasas_instance_template_skinny) {
1004 seq_offset = ®_set->fusion_seq_offset;
1005 hostdiag_offset = ®_set->fusion_host_diag;
1006 }
1007
1008 writel(0, seq_offset);
1009 writel(4, seq_offset);
1010 writel(0xb, seq_offset);
1011 writel(2, seq_offset);
1012 writel(7, seq_offset);
1013 writel(0xd, seq_offset);
1014
1015 msleep(1000);
1016
1017 HostDiag = (u32)readl(hostdiag_offset);
1018
1019 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1020 msleep(100);
1021 HostDiag = (u32)readl(hostdiag_offset);
1022 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1023 retry, HostDiag);
1024
1025 if (retry++ >= 100)
1026 return 1;
1027
1028 }
1029
1030 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1031
1032 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1033
1034 ssleep(10);
1035
1036 HostDiag = (u32)readl(hostdiag_offset);
1037 while (HostDiag & DIAG_RESET_ADAPTER) {
1038 msleep(100);
1039 HostDiag = (u32)readl(hostdiag_offset);
1040 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1041 retry, HostDiag);
1042
1043 if (retry++ >= 1000)
1044 return 1;
1045
1046 }
1047 return 0;
1048 }
1049
1050 /**
1051 * megasas_check_reset_gen2 - For controller reset check
1052 * @instance: Adapter soft state
1053 * @regs: MFI register set
1054 */
1055 static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)1056 megasas_check_reset_gen2(struct megasas_instance *instance,
1057 struct megasas_register_set __iomem *regs)
1058 {
1059 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1060 return 1;
1061
1062 return 0;
1063 }
1064
1065 static struct megasas_instance_template megasas_instance_template_gen2 = {
1066
1067 .fire_cmd = megasas_fire_cmd_gen2,
1068 .enable_intr = megasas_enable_intr_gen2,
1069 .disable_intr = megasas_disable_intr_gen2,
1070 .clear_intr = megasas_clear_intr_gen2,
1071 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1072 .adp_reset = megasas_adp_reset_gen2,
1073 .check_reset = megasas_check_reset_gen2,
1074 .service_isr = megasas_isr,
1075 .tasklet = megasas_complete_cmd_dpc,
1076 .init_adapter = megasas_init_adapter_mfi,
1077 .build_and_issue_cmd = megasas_build_and_issue_cmd,
1078 .issue_dcmd = megasas_issue_dcmd,
1079 };
1080
1081 /*
1082 * This is the end of set of functions & definitions
1083 * specific to gen2 (deviceid : 0x78, 0x79) controllers
1084 */
1085
1086 /*
1087 * Template added for TB (Fusion)
1088 */
1089 extern struct megasas_instance_template megasas_instance_template_fusion;
1090
1091 /**
1092 * megasas_issue_polled - Issues a polling command
1093 * @instance: Adapter soft state
1094 * @cmd: Command packet to be issued
1095 *
1096 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1097 */
1098 int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)1099 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1100 {
1101 struct megasas_header *frame_hdr = &cmd->frame->hdr;
1102
1103 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1104 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1105
1106 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1107 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1108 __func__, __LINE__);
1109 return DCMD_INIT;
1110 }
1111
1112 instance->instancet->issue_dcmd(instance, cmd);
1113
1114 return wait_and_poll(instance, cmd, instance->requestorId ?
1115 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1116 }
1117
1118 /**
1119 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1120 * @instance: Adapter soft state
1121 * @cmd: Command to be issued
1122 * @timeout: Timeout in seconds
1123 *
1124 * This function waits on an event for the command to be returned from ISR.
1125 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1126 * Used to issue ioctl commands.
1127 */
1128 int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1129 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1130 struct megasas_cmd *cmd, int timeout)
1131 {
1132 int ret = 0;
1133 cmd->cmd_status_drv = DCMD_INIT;
1134
1135 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1136 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1137 __func__, __LINE__);
1138 return DCMD_INIT;
1139 }
1140
1141 instance->instancet->issue_dcmd(instance, cmd);
1142
1143 if (timeout) {
1144 ret = wait_event_timeout(instance->int_cmd_wait_q,
1145 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1146 if (!ret) {
1147 dev_err(&instance->pdev->dev,
1148 "DCMD(opcode: 0x%x) is timed out, func:%s\n",
1149 cmd->frame->dcmd.opcode, __func__);
1150 return DCMD_TIMEOUT;
1151 }
1152 } else
1153 wait_event(instance->int_cmd_wait_q,
1154 cmd->cmd_status_drv != DCMD_INIT);
1155
1156 return cmd->cmd_status_drv;
1157 }
1158
1159 /**
1160 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1161 * @instance: Adapter soft state
1162 * @cmd_to_abort: Previously issued cmd to be aborted
1163 * @timeout: Timeout in seconds
1164 *
1165 * MFI firmware can abort previously issued AEN comamnd (automatic event
1166 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1167 * cmd and waits for return status.
1168 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1169 */
1170 static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1171 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1172 struct megasas_cmd *cmd_to_abort, int timeout)
1173 {
1174 struct megasas_cmd *cmd;
1175 struct megasas_abort_frame *abort_fr;
1176 int ret = 0;
1177 u32 opcode;
1178
1179 cmd = megasas_get_cmd(instance);
1180
1181 if (!cmd)
1182 return -1;
1183
1184 abort_fr = &cmd->frame->abort;
1185
1186 /*
1187 * Prepare and issue the abort frame
1188 */
1189 abort_fr->cmd = MFI_CMD_ABORT;
1190 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1191 abort_fr->flags = cpu_to_le16(0);
1192 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1193 abort_fr->abort_mfi_phys_addr_lo =
1194 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1195 abort_fr->abort_mfi_phys_addr_hi =
1196 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1197
1198 cmd->sync_cmd = 1;
1199 cmd->cmd_status_drv = DCMD_INIT;
1200
1201 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1202 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1203 __func__, __LINE__);
1204 return DCMD_INIT;
1205 }
1206
1207 instance->instancet->issue_dcmd(instance, cmd);
1208
1209 if (timeout) {
1210 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1211 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1212 if (!ret) {
1213 opcode = cmd_to_abort->frame->dcmd.opcode;
1214 dev_err(&instance->pdev->dev,
1215 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1216 opcode, __func__);
1217 return DCMD_TIMEOUT;
1218 }
1219 } else
1220 wait_event(instance->abort_cmd_wait_q,
1221 cmd->cmd_status_drv != DCMD_INIT);
1222
1223 cmd->sync_cmd = 0;
1224
1225 megasas_return_cmd(instance, cmd);
1226 return cmd->cmd_status_drv;
1227 }
1228
1229 /**
1230 * megasas_make_sgl32 - Prepares 32-bit SGL
1231 * @instance: Adapter soft state
1232 * @scp: SCSI command from the mid-layer
1233 * @mfi_sgl: SGL to be filled in
1234 *
1235 * If successful, this function returns the number of SG elements. Otherwise,
1236 * it returnes -1.
1237 */
1238 static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1239 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1240 union megasas_sgl *mfi_sgl)
1241 {
1242 int i;
1243 int sge_count;
1244 struct scatterlist *os_sgl;
1245
1246 sge_count = scsi_dma_map(scp);
1247 BUG_ON(sge_count < 0);
1248
1249 if (sge_count) {
1250 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1251 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1252 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1253 }
1254 }
1255 return sge_count;
1256 }
1257
1258 /**
1259 * megasas_make_sgl64 - Prepares 64-bit SGL
1260 * @instance: Adapter soft state
1261 * @scp: SCSI command from the mid-layer
1262 * @mfi_sgl: SGL to be filled in
1263 *
1264 * If successful, this function returns the number of SG elements. Otherwise,
1265 * it returnes -1.
1266 */
1267 static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1268 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1269 union megasas_sgl *mfi_sgl)
1270 {
1271 int i;
1272 int sge_count;
1273 struct scatterlist *os_sgl;
1274
1275 sge_count = scsi_dma_map(scp);
1276 BUG_ON(sge_count < 0);
1277
1278 if (sge_count) {
1279 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1280 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1281 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1282 }
1283 }
1284 return sge_count;
1285 }
1286
1287 /**
1288 * megasas_make_sgl_skinny - Prepares IEEE SGL
1289 * @instance: Adapter soft state
1290 * @scp: SCSI command from the mid-layer
1291 * @mfi_sgl: SGL to be filled in
1292 *
1293 * If successful, this function returns the number of SG elements. Otherwise,
1294 * it returnes -1.
1295 */
1296 static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1297 megasas_make_sgl_skinny(struct megasas_instance *instance,
1298 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1299 {
1300 int i;
1301 int sge_count;
1302 struct scatterlist *os_sgl;
1303
1304 sge_count = scsi_dma_map(scp);
1305
1306 if (sge_count) {
1307 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1308 mfi_sgl->sge_skinny[i].length =
1309 cpu_to_le32(sg_dma_len(os_sgl));
1310 mfi_sgl->sge_skinny[i].phys_addr =
1311 cpu_to_le64(sg_dma_address(os_sgl));
1312 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1313 }
1314 }
1315 return sge_count;
1316 }
1317
1318 /**
1319 * megasas_get_frame_count - Computes the number of frames
1320 * @frame_type : type of frame- io or pthru frame
1321 * @sge_count : number of sg elements
1322 *
1323 * Returns the number of frames required for numnber of sge's (sge_count)
1324 */
1325
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1326 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1327 u8 sge_count, u8 frame_type)
1328 {
1329 int num_cnt;
1330 int sge_bytes;
1331 u32 sge_sz;
1332 u32 frame_count = 0;
1333
1334 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1335 sizeof(struct megasas_sge32);
1336
1337 if (instance->flag_ieee) {
1338 sge_sz = sizeof(struct megasas_sge_skinny);
1339 }
1340
1341 /*
1342 * Main frame can contain 2 SGEs for 64-bit SGLs and
1343 * 3 SGEs for 32-bit SGLs for ldio &
1344 * 1 SGEs for 64-bit SGLs and
1345 * 2 SGEs for 32-bit SGLs for pthru frame
1346 */
1347 if (unlikely(frame_type == PTHRU_FRAME)) {
1348 if (instance->flag_ieee == 1) {
1349 num_cnt = sge_count - 1;
1350 } else if (IS_DMA64)
1351 num_cnt = sge_count - 1;
1352 else
1353 num_cnt = sge_count - 2;
1354 } else {
1355 if (instance->flag_ieee == 1) {
1356 num_cnt = sge_count - 1;
1357 } else if (IS_DMA64)
1358 num_cnt = sge_count - 2;
1359 else
1360 num_cnt = sge_count - 3;
1361 }
1362
1363 if (num_cnt > 0) {
1364 sge_bytes = sge_sz * num_cnt;
1365
1366 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1367 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1368 }
1369 /* Main frame */
1370 frame_count += 1;
1371
1372 if (frame_count > 7)
1373 frame_count = 8;
1374 return frame_count;
1375 }
1376
1377 /**
1378 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1379 * @instance: Adapter soft state
1380 * @scp: SCSI command
1381 * @cmd: Command to be prepared in
1382 *
1383 * This function prepares CDB commands. These are typcially pass-through
1384 * commands to the devices.
1385 */
1386 static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1387 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1388 struct megasas_cmd *cmd)
1389 {
1390 u32 is_logical;
1391 u32 device_id;
1392 u16 flags = 0;
1393 struct megasas_pthru_frame *pthru;
1394
1395 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1396 device_id = MEGASAS_DEV_INDEX(scp);
1397 pthru = (struct megasas_pthru_frame *)cmd->frame;
1398
1399 if (scp->sc_data_direction == DMA_TO_DEVICE)
1400 flags = MFI_FRAME_DIR_WRITE;
1401 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1402 flags = MFI_FRAME_DIR_READ;
1403 else if (scp->sc_data_direction == DMA_NONE)
1404 flags = MFI_FRAME_DIR_NONE;
1405
1406 if (instance->flag_ieee == 1) {
1407 flags |= MFI_FRAME_IEEE;
1408 }
1409
1410 /*
1411 * Prepare the DCDB frame
1412 */
1413 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1414 pthru->cmd_status = 0x0;
1415 pthru->scsi_status = 0x0;
1416 pthru->target_id = device_id;
1417 pthru->lun = scp->device->lun;
1418 pthru->cdb_len = scp->cmd_len;
1419 pthru->timeout = 0;
1420 pthru->pad_0 = 0;
1421 pthru->flags = cpu_to_le16(flags);
1422 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1423
1424 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1425
1426 /*
1427 * If the command is for the tape device, set the
1428 * pthru timeout to the os layer timeout value.
1429 */
1430 if (scp->device->type == TYPE_TAPE) {
1431 if ((scp->request->timeout / HZ) > 0xFFFF)
1432 pthru->timeout = cpu_to_le16(0xFFFF);
1433 else
1434 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1435 }
1436
1437 /*
1438 * Construct SGL
1439 */
1440 if (instance->flag_ieee == 1) {
1441 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1442 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1443 &pthru->sgl);
1444 } else if (IS_DMA64) {
1445 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1446 pthru->sge_count = megasas_make_sgl64(instance, scp,
1447 &pthru->sgl);
1448 } else
1449 pthru->sge_count = megasas_make_sgl32(instance, scp,
1450 &pthru->sgl);
1451
1452 if (pthru->sge_count > instance->max_num_sge) {
1453 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1454 pthru->sge_count);
1455 return 0;
1456 }
1457
1458 /*
1459 * Sense info specific
1460 */
1461 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1462 pthru->sense_buf_phys_addr_hi =
1463 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1464 pthru->sense_buf_phys_addr_lo =
1465 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1466
1467 /*
1468 * Compute the total number of frames this command consumes. FW uses
1469 * this number to pull sufficient number of frames from host memory.
1470 */
1471 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1472 PTHRU_FRAME);
1473
1474 return cmd->frame_count;
1475 }
1476
1477 /**
1478 * megasas_build_ldio - Prepares IOs to logical devices
1479 * @instance: Adapter soft state
1480 * @scp: SCSI command
1481 * @cmd: Command to be prepared
1482 *
1483 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1484 */
1485 static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1486 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1487 struct megasas_cmd *cmd)
1488 {
1489 u32 device_id;
1490 u8 sc = scp->cmnd[0];
1491 u16 flags = 0;
1492 struct megasas_io_frame *ldio;
1493
1494 device_id = MEGASAS_DEV_INDEX(scp);
1495 ldio = (struct megasas_io_frame *)cmd->frame;
1496
1497 if (scp->sc_data_direction == DMA_TO_DEVICE)
1498 flags = MFI_FRAME_DIR_WRITE;
1499 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1500 flags = MFI_FRAME_DIR_READ;
1501
1502 if (instance->flag_ieee == 1) {
1503 flags |= MFI_FRAME_IEEE;
1504 }
1505
1506 /*
1507 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1508 */
1509 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1510 ldio->cmd_status = 0x0;
1511 ldio->scsi_status = 0x0;
1512 ldio->target_id = device_id;
1513 ldio->timeout = 0;
1514 ldio->reserved_0 = 0;
1515 ldio->pad_0 = 0;
1516 ldio->flags = cpu_to_le16(flags);
1517 ldio->start_lba_hi = 0;
1518 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1519
1520 /*
1521 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1522 */
1523 if (scp->cmd_len == 6) {
1524 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1525 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1526 ((u32) scp->cmnd[2] << 8) |
1527 (u32) scp->cmnd[3]);
1528
1529 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1530 }
1531
1532 /*
1533 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1534 */
1535 else if (scp->cmd_len == 10) {
1536 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1537 ((u32) scp->cmnd[7] << 8));
1538 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1539 ((u32) scp->cmnd[3] << 16) |
1540 ((u32) scp->cmnd[4] << 8) |
1541 (u32) scp->cmnd[5]);
1542 }
1543
1544 /*
1545 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1546 */
1547 else if (scp->cmd_len == 12) {
1548 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1549 ((u32) scp->cmnd[7] << 16) |
1550 ((u32) scp->cmnd[8] << 8) |
1551 (u32) scp->cmnd[9]);
1552
1553 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1554 ((u32) scp->cmnd[3] << 16) |
1555 ((u32) scp->cmnd[4] << 8) |
1556 (u32) scp->cmnd[5]);
1557 }
1558
1559 /*
1560 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1561 */
1562 else if (scp->cmd_len == 16) {
1563 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1564 ((u32) scp->cmnd[11] << 16) |
1565 ((u32) scp->cmnd[12] << 8) |
1566 (u32) scp->cmnd[13]);
1567
1568 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1569 ((u32) scp->cmnd[7] << 16) |
1570 ((u32) scp->cmnd[8] << 8) |
1571 (u32) scp->cmnd[9]);
1572
1573 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1574 ((u32) scp->cmnd[3] << 16) |
1575 ((u32) scp->cmnd[4] << 8) |
1576 (u32) scp->cmnd[5]);
1577
1578 }
1579
1580 /*
1581 * Construct SGL
1582 */
1583 if (instance->flag_ieee) {
1584 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1585 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1586 &ldio->sgl);
1587 } else if (IS_DMA64) {
1588 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1589 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1590 } else
1591 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1592
1593 if (ldio->sge_count > instance->max_num_sge) {
1594 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1595 ldio->sge_count);
1596 return 0;
1597 }
1598
1599 /*
1600 * Sense info specific
1601 */
1602 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1603 ldio->sense_buf_phys_addr_hi = 0;
1604 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1605
1606 /*
1607 * Compute the total number of frames this command consumes. FW uses
1608 * this number to pull sufficient number of frames from host memory.
1609 */
1610 cmd->frame_count = megasas_get_frame_count(instance,
1611 ldio->sge_count, IO_FRAME);
1612
1613 return cmd->frame_count;
1614 }
1615
1616 /**
1617 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1618 * and whether it's RW or non RW
1619 * @cmd: SCSI command
1620 *
1621 */
megasas_cmd_type(struct scsi_cmnd * cmd)1622 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1623 {
1624 int ret;
1625
1626 switch (cmd->cmnd[0]) {
1627 case READ_10:
1628 case WRITE_10:
1629 case READ_12:
1630 case WRITE_12:
1631 case READ_6:
1632 case WRITE_6:
1633 case READ_16:
1634 case WRITE_16:
1635 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1636 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1637 break;
1638 default:
1639 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1640 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1641 }
1642 return ret;
1643 }
1644
1645 /**
1646 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1647 * in FW
1648 * @instance: Adapter soft state
1649 */
1650 static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1651 megasas_dump_pending_frames(struct megasas_instance *instance)
1652 {
1653 struct megasas_cmd *cmd;
1654 int i,n;
1655 union megasas_sgl *mfi_sgl;
1656 struct megasas_io_frame *ldio;
1657 struct megasas_pthru_frame *pthru;
1658 u32 sgcount;
1659 u16 max_cmd = instance->max_fw_cmds;
1660
1661 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1662 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1663 if (IS_DMA64)
1664 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1665 else
1666 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1667
1668 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1669 for (i = 0; i < max_cmd; i++) {
1670 cmd = instance->cmd_list[i];
1671 if (!cmd->scmd)
1672 continue;
1673 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1674 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1675 ldio = (struct megasas_io_frame *)cmd->frame;
1676 mfi_sgl = &ldio->sgl;
1677 sgcount = ldio->sge_count;
1678 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1679 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1680 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1681 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1682 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1683 } else {
1684 pthru = (struct megasas_pthru_frame *) cmd->frame;
1685 mfi_sgl = &pthru->sgl;
1686 sgcount = pthru->sge_count;
1687 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1688 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1689 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1690 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1691 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1692 }
1693 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1694 for (n = 0; n < sgcount; n++) {
1695 if (IS_DMA64)
1696 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1697 le32_to_cpu(mfi_sgl->sge64[n].length),
1698 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1699 else
1700 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1701 le32_to_cpu(mfi_sgl->sge32[n].length),
1702 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1703 }
1704 }
1705 } /*for max_cmd*/
1706 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1707 for (i = 0; i < max_cmd; i++) {
1708
1709 cmd = instance->cmd_list[i];
1710
1711 if (cmd->sync_cmd == 1)
1712 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1713 }
1714 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1715 }
1716
1717 u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1718 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1719 struct scsi_cmnd *scmd)
1720 {
1721 struct megasas_cmd *cmd;
1722 u32 frame_count;
1723
1724 cmd = megasas_get_cmd(instance);
1725 if (!cmd)
1726 return SCSI_MLQUEUE_HOST_BUSY;
1727
1728 /*
1729 * Logical drive command
1730 */
1731 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1732 frame_count = megasas_build_ldio(instance, scmd, cmd);
1733 else
1734 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1735
1736 if (!frame_count)
1737 goto out_return_cmd;
1738
1739 cmd->scmd = scmd;
1740 scmd->SCp.ptr = (char *)cmd;
1741
1742 /*
1743 * Issue the command to the FW
1744 */
1745 atomic_inc(&instance->fw_outstanding);
1746
1747 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1748 cmd->frame_count-1, instance->reg_set);
1749
1750 return 0;
1751 out_return_cmd:
1752 megasas_return_cmd(instance, cmd);
1753 return SCSI_MLQUEUE_HOST_BUSY;
1754 }
1755
1756
1757 /**
1758 * megasas_queue_command - Queue entry point
1759 * @shost: adapter SCSI host
1760 * @scmd: SCSI command to be queued
1761 */
1762 static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1763 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1764 {
1765 struct megasas_instance *instance;
1766 struct MR_PRIV_DEVICE *mr_device_priv_data;
1767
1768 instance = (struct megasas_instance *)
1769 scmd->device->host->hostdata;
1770
1771 if (instance->unload == 1) {
1772 scmd->result = DID_NO_CONNECT << 16;
1773 scmd->scsi_done(scmd);
1774 return 0;
1775 }
1776
1777 if (instance->issuepend_done == 0)
1778 return SCSI_MLQUEUE_HOST_BUSY;
1779
1780
1781 /* Check for an mpio path and adjust behavior */
1782 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1783 if (megasas_check_mpio_paths(instance, scmd) ==
1784 (DID_REQUEUE << 16)) {
1785 return SCSI_MLQUEUE_HOST_BUSY;
1786 } else {
1787 scmd->result = DID_NO_CONNECT << 16;
1788 scmd->scsi_done(scmd);
1789 return 0;
1790 }
1791 }
1792
1793 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1794 scmd->result = DID_NO_CONNECT << 16;
1795 scmd->scsi_done(scmd);
1796 return 0;
1797 }
1798
1799 mr_device_priv_data = scmd->device->hostdata;
1800 if (!mr_device_priv_data) {
1801 scmd->result = DID_NO_CONNECT << 16;
1802 scmd->scsi_done(scmd);
1803 return 0;
1804 }
1805
1806 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1807 return SCSI_MLQUEUE_HOST_BUSY;
1808
1809 if (mr_device_priv_data->tm_busy)
1810 return SCSI_MLQUEUE_DEVICE_BUSY;
1811
1812
1813 scmd->result = 0;
1814
1815 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1816 (scmd->device->id >= instance->fw_supported_vd_count ||
1817 scmd->device->lun)) {
1818 scmd->result = DID_BAD_TARGET << 16;
1819 goto out_done;
1820 }
1821
1822 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1823 MEGASAS_IS_LOGICAL(scmd->device) &&
1824 (!instance->fw_sync_cache_support)) {
1825 scmd->result = DID_OK << 16;
1826 goto out_done;
1827 }
1828
1829 return instance->instancet->build_and_issue_cmd(instance, scmd);
1830
1831 out_done:
1832 scmd->scsi_done(scmd);
1833 return 0;
1834 }
1835
megasas_lookup_instance(u16 host_no)1836 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1837 {
1838 int i;
1839
1840 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1841
1842 if ((megasas_mgmt_info.instance[i]) &&
1843 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1844 return megasas_mgmt_info.instance[i];
1845 }
1846
1847 return NULL;
1848 }
1849
1850 /*
1851 * megasas_set_dynamic_target_properties -
1852 * Device property set by driver may not be static and it is required to be
1853 * updated after OCR
1854 *
1855 * set tm_capable.
1856 * set dma alignment (only for eedp protection enable vd).
1857 *
1858 * @sdev: OS provided scsi device
1859 *
1860 * Returns void
1861 */
megasas_set_dynamic_target_properties(struct scsi_device * sdev,bool is_target_prop)1862 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1863 bool is_target_prop)
1864 {
1865 u16 pd_index = 0, ld;
1866 u32 device_id;
1867 struct megasas_instance *instance;
1868 struct fusion_context *fusion;
1869 struct MR_PRIV_DEVICE *mr_device_priv_data;
1870 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1871 struct MR_LD_RAID *raid;
1872 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1873
1874 instance = megasas_lookup_instance(sdev->host->host_no);
1875 fusion = instance->ctrl_context;
1876 mr_device_priv_data = sdev->hostdata;
1877
1878 if (!fusion || !mr_device_priv_data)
1879 return;
1880
1881 if (MEGASAS_IS_LOGICAL(sdev)) {
1882 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1883 + sdev->id;
1884 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1885 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1886 if (ld >= instance->fw_supported_vd_count)
1887 return;
1888 raid = MR_LdRaidGet(ld, local_map_ptr);
1889
1890 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1891 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1892
1893 mr_device_priv_data->is_tm_capable =
1894 raid->capability.tmCapable;
1895
1896 if (!raid->flags.isEPD)
1897 sdev->no_write_same = 1;
1898
1899 } else if (instance->use_seqnum_jbod_fp) {
1900 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1901 sdev->id;
1902 pd_sync = (void *)fusion->pd_seq_sync
1903 [(instance->pd_seq_map_id - 1) & 1];
1904 mr_device_priv_data->is_tm_capable =
1905 pd_sync->seq[pd_index].capability.tmCapable;
1906 }
1907
1908 if (is_target_prop && instance->tgt_prop->reset_tmo) {
1909 /*
1910 * If FW provides a target reset timeout value, driver will use
1911 * it. If not set, fallback to default values.
1912 */
1913 mr_device_priv_data->target_reset_tmo =
1914 min_t(u8, instance->max_reset_tmo,
1915 instance->tgt_prop->reset_tmo);
1916 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1917 } else {
1918 mr_device_priv_data->target_reset_tmo =
1919 MEGASAS_DEFAULT_TM_TIMEOUT;
1920 mr_device_priv_data->task_abort_tmo =
1921 MEGASAS_DEFAULT_TM_TIMEOUT;
1922 }
1923 }
1924
1925 /*
1926 * megasas_set_nvme_device_properties -
1927 * set nomerges=2
1928 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1929 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1930 *
1931 * MR firmware provides value in KB. Caller of this function converts
1932 * kb into bytes.
1933 *
1934 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1935 * MR firmware provides value 128 as (32 * 4K) = 128K.
1936 *
1937 * @sdev: scsi device
1938 * @max_io_size: maximum io transfer size
1939 *
1940 */
1941 static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1942 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1943 {
1944 struct megasas_instance *instance;
1945 u32 mr_nvme_pg_size;
1946
1947 instance = (struct megasas_instance *)sdev->host->hostdata;
1948 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1949 MR_DEFAULT_NVME_PAGE_SIZE);
1950
1951 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1952
1953 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1954 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1955 }
1956
1957 /*
1958 * megasas_set_fw_assisted_qd -
1959 * set device queue depth to can_queue
1960 * set device queue depth to fw assisted qd
1961 *
1962 * @sdev: scsi device
1963 * @is_target_prop true, if fw provided target properties.
1964 */
megasas_set_fw_assisted_qd(struct scsi_device * sdev,bool is_target_prop)1965 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1966 bool is_target_prop)
1967 {
1968 u8 interface_type;
1969 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1970 u32 tgt_device_qd;
1971 struct megasas_instance *instance;
1972 struct MR_PRIV_DEVICE *mr_device_priv_data;
1973
1974 instance = megasas_lookup_instance(sdev->host->host_no);
1975 mr_device_priv_data = sdev->hostdata;
1976 interface_type = mr_device_priv_data->interface_type;
1977
1978 switch (interface_type) {
1979 case SAS_PD:
1980 device_qd = MEGASAS_SAS_QD;
1981 break;
1982 case SATA_PD:
1983 device_qd = MEGASAS_SATA_QD;
1984 break;
1985 case NVME_PD:
1986 device_qd = MEGASAS_NVME_QD;
1987 break;
1988 }
1989
1990 if (is_target_prop) {
1991 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1992 if (tgt_device_qd)
1993 device_qd = min(instance->host->can_queue,
1994 (int)tgt_device_qd);
1995 }
1996
1997 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1998 device_qd = instance->host->can_queue;
1999
2000 scsi_change_queue_depth(sdev, device_qd);
2001 }
2002
2003 /*
2004 * megasas_set_static_target_properties -
2005 * Device property set by driver are static and it is not required to be
2006 * updated after OCR.
2007 *
2008 * set io timeout
2009 * set device queue depth
2010 * set nvme device properties. see - megasas_set_nvme_device_properties
2011 *
2012 * @sdev: scsi device
2013 * @is_target_prop true, if fw provided target properties.
2014 */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)2015 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2016 bool is_target_prop)
2017 {
2018 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2019 struct megasas_instance *instance;
2020
2021 instance = megasas_lookup_instance(sdev->host->host_no);
2022
2023 /*
2024 * The RAID firmware may require extended timeouts.
2025 */
2026 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2027
2028 /* max_io_size_kb will be set to non zero for
2029 * nvme based vd and syspd.
2030 */
2031 if (is_target_prop)
2032 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2033
2034 if (instance->nvme_page_size && max_io_size_kb)
2035 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2036
2037 megasas_set_fw_assisted_qd(sdev, is_target_prop);
2038 }
2039
2040
megasas_slave_configure(struct scsi_device * sdev)2041 static int megasas_slave_configure(struct scsi_device *sdev)
2042 {
2043 u16 pd_index = 0;
2044 struct megasas_instance *instance;
2045 int ret_target_prop = DCMD_FAILED;
2046 bool is_target_prop = false;
2047
2048 instance = megasas_lookup_instance(sdev->host->host_no);
2049 if (instance->pd_list_not_supported) {
2050 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2051 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2052 sdev->id;
2053 if (instance->pd_list[pd_index].driveState !=
2054 MR_PD_STATE_SYSTEM)
2055 return -ENXIO;
2056 }
2057 }
2058
2059 mutex_lock(&instance->reset_mutex);
2060 /* Send DCMD to Firmware and cache the information */
2061 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2062 megasas_get_pd_info(instance, sdev);
2063
2064 /* Some ventura firmware may not have instance->nvme_page_size set.
2065 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2066 */
2067 if ((instance->tgt_prop) && (instance->nvme_page_size))
2068 ret_target_prop = megasas_get_target_prop(instance, sdev);
2069
2070 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2071 megasas_set_static_target_properties(sdev, is_target_prop);
2072
2073 /* This sdev property may change post OCR */
2074 megasas_set_dynamic_target_properties(sdev, is_target_prop);
2075
2076 mutex_unlock(&instance->reset_mutex);
2077
2078 return 0;
2079 }
2080
megasas_slave_alloc(struct scsi_device * sdev)2081 static int megasas_slave_alloc(struct scsi_device *sdev)
2082 {
2083 u16 pd_index = 0;
2084 struct megasas_instance *instance ;
2085 struct MR_PRIV_DEVICE *mr_device_priv_data;
2086
2087 instance = megasas_lookup_instance(sdev->host->host_no);
2088 if (!MEGASAS_IS_LOGICAL(sdev)) {
2089 /*
2090 * Open the OS scan to the SYSTEM PD
2091 */
2092 pd_index =
2093 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2094 sdev->id;
2095 if ((instance->pd_list_not_supported ||
2096 instance->pd_list[pd_index].driveState ==
2097 MR_PD_STATE_SYSTEM)) {
2098 goto scan_target;
2099 }
2100 return -ENXIO;
2101 }
2102
2103 scan_target:
2104 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2105 GFP_KERNEL);
2106 if (!mr_device_priv_data)
2107 return -ENOMEM;
2108 sdev->hostdata = mr_device_priv_data;
2109
2110 atomic_set(&mr_device_priv_data->r1_ldio_hint,
2111 instance->r1_ldio_hint_default);
2112 return 0;
2113 }
2114
megasas_slave_destroy(struct scsi_device * sdev)2115 static void megasas_slave_destroy(struct scsi_device *sdev)
2116 {
2117 kfree(sdev->hostdata);
2118 sdev->hostdata = NULL;
2119 }
2120
2121 /*
2122 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2123 * kill adapter
2124 * @instance: Adapter soft state
2125 *
2126 */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)2127 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2128 {
2129 int i;
2130 struct megasas_cmd *cmd_mfi;
2131 struct megasas_cmd_fusion *cmd_fusion;
2132 struct fusion_context *fusion = instance->ctrl_context;
2133
2134 /* Find all outstanding ioctls */
2135 if (fusion) {
2136 for (i = 0; i < instance->max_fw_cmds; i++) {
2137 cmd_fusion = fusion->cmd_list[i];
2138 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2139 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2140 if (cmd_mfi->sync_cmd &&
2141 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2142 cmd_mfi->frame->hdr.cmd_status =
2143 MFI_STAT_WRONG_STATE;
2144 megasas_complete_cmd(instance,
2145 cmd_mfi, DID_OK);
2146 }
2147 }
2148 }
2149 } else {
2150 for (i = 0; i < instance->max_fw_cmds; i++) {
2151 cmd_mfi = instance->cmd_list[i];
2152 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2153 MFI_CMD_ABORT)
2154 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2155 }
2156 }
2157 }
2158
2159
megaraid_sas_kill_hba(struct megasas_instance * instance)2160 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2161 {
2162 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2163 dev_warn(&instance->pdev->dev,
2164 "Adapter already dead, skipping kill HBA\n");
2165 return;
2166 }
2167
2168 /* Set critical error to block I/O & ioctls in case caller didn't */
2169 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2170 /* Wait 1 second to ensure IO or ioctls in build have posted */
2171 msleep(1000);
2172 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2173 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2174 (instance->adapter_type != MFI_SERIES)) {
2175 if (!instance->requestorId) {
2176 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2177 /* Flush */
2178 readl(&instance->reg_set->doorbell);
2179 }
2180 if (instance->requestorId && instance->peerIsPresent)
2181 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2182 } else {
2183 writel(MFI_STOP_ADP,
2184 &instance->reg_set->inbound_doorbell);
2185 }
2186 /* Complete outstanding ioctls when adapter is killed */
2187 megasas_complete_outstanding_ioctls(instance);
2188 }
2189
2190 /**
2191 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2192 * restored to max value
2193 * @instance: Adapter soft state
2194 *
2195 */
2196 void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2197 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2198 {
2199 unsigned long flags;
2200
2201 if (instance->flag & MEGASAS_FW_BUSY
2202 && time_after(jiffies, instance->last_time + 5 * HZ)
2203 && atomic_read(&instance->fw_outstanding) <
2204 instance->throttlequeuedepth + 1) {
2205
2206 spin_lock_irqsave(instance->host->host_lock, flags);
2207 instance->flag &= ~MEGASAS_FW_BUSY;
2208
2209 instance->host->can_queue = instance->cur_can_queue;
2210 spin_unlock_irqrestore(instance->host->host_lock, flags);
2211 }
2212 }
2213
2214 /**
2215 * megasas_complete_cmd_dpc - Returns FW's controller structure
2216 * @instance_addr: Address of adapter soft state
2217 *
2218 * Tasklet to complete cmds
2219 */
megasas_complete_cmd_dpc(unsigned long instance_addr)2220 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2221 {
2222 u32 producer;
2223 u32 consumer;
2224 u32 context;
2225 struct megasas_cmd *cmd;
2226 struct megasas_instance *instance =
2227 (struct megasas_instance *)instance_addr;
2228 unsigned long flags;
2229
2230 /* If we have already declared adapter dead, donot complete cmds */
2231 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2232 return;
2233
2234 spin_lock_irqsave(&instance->completion_lock, flags);
2235
2236 producer = le32_to_cpu(*instance->producer);
2237 consumer = le32_to_cpu(*instance->consumer);
2238
2239 while (consumer != producer) {
2240 context = le32_to_cpu(instance->reply_queue[consumer]);
2241 if (context >= instance->max_fw_cmds) {
2242 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2243 context);
2244 BUG();
2245 }
2246
2247 cmd = instance->cmd_list[context];
2248
2249 megasas_complete_cmd(instance, cmd, DID_OK);
2250
2251 consumer++;
2252 if (consumer == (instance->max_fw_cmds + 1)) {
2253 consumer = 0;
2254 }
2255 }
2256
2257 *instance->consumer = cpu_to_le32(producer);
2258
2259 spin_unlock_irqrestore(&instance->completion_lock, flags);
2260
2261 /*
2262 * Check if we can restore can_queue
2263 */
2264 megasas_check_and_restore_queue_depth(instance);
2265 }
2266
2267 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2268
2269 /**
2270 * megasas_start_timer - Initializes sriov heartbeat timer object
2271 * @instance: Adapter soft state
2272 *
2273 */
megasas_start_timer(struct megasas_instance * instance)2274 void megasas_start_timer(struct megasas_instance *instance)
2275 {
2276 struct timer_list *timer = &instance->sriov_heartbeat_timer;
2277
2278 timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2279 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2280 add_timer(timer);
2281 }
2282
2283 static void
2284 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2285
2286 static void
2287 process_fw_state_change_wq(struct work_struct *work);
2288
megasas_do_ocr(struct megasas_instance * instance)2289 static void megasas_do_ocr(struct megasas_instance *instance)
2290 {
2291 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2292 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2293 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2294 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2295 }
2296 instance->instancet->disable_intr(instance);
2297 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2298 instance->issuepend_done = 0;
2299
2300 atomic_set(&instance->fw_outstanding, 0);
2301 megasas_internal_reset_defer_cmds(instance);
2302 process_fw_state_change_wq(&instance->work_init);
2303 }
2304
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2305 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2306 int initial)
2307 {
2308 struct megasas_cmd *cmd;
2309 struct megasas_dcmd_frame *dcmd;
2310 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2311 dma_addr_t new_affiliation_111_h;
2312 int ld, retval = 0;
2313 u8 thisVf;
2314
2315 cmd = megasas_get_cmd(instance);
2316
2317 if (!cmd) {
2318 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2319 "Failed to get cmd for scsi%d\n",
2320 instance->host->host_no);
2321 return -ENOMEM;
2322 }
2323
2324 dcmd = &cmd->frame->dcmd;
2325
2326 if (!instance->vf_affiliation_111) {
2327 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2328 "affiliation for scsi%d\n", instance->host->host_no);
2329 megasas_return_cmd(instance, cmd);
2330 return -ENOMEM;
2331 }
2332
2333 if (initial)
2334 memset(instance->vf_affiliation_111, 0,
2335 sizeof(struct MR_LD_VF_AFFILIATION_111));
2336 else {
2337 new_affiliation_111 =
2338 dma_alloc_coherent(&instance->pdev->dev,
2339 sizeof(struct MR_LD_VF_AFFILIATION_111),
2340 &new_affiliation_111_h, GFP_KERNEL);
2341 if (!new_affiliation_111) {
2342 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2343 "memory for new affiliation for scsi%d\n",
2344 instance->host->host_no);
2345 megasas_return_cmd(instance, cmd);
2346 return -ENOMEM;
2347 }
2348 }
2349
2350 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2351
2352 dcmd->cmd = MFI_CMD_DCMD;
2353 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2354 dcmd->sge_count = 1;
2355 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2356 dcmd->timeout = 0;
2357 dcmd->pad_0 = 0;
2358 dcmd->data_xfer_len =
2359 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2360 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2361
2362 if (initial)
2363 dcmd->sgl.sge32[0].phys_addr =
2364 cpu_to_le32(instance->vf_affiliation_111_h);
2365 else
2366 dcmd->sgl.sge32[0].phys_addr =
2367 cpu_to_le32(new_affiliation_111_h);
2368
2369 dcmd->sgl.sge32[0].length = cpu_to_le32(
2370 sizeof(struct MR_LD_VF_AFFILIATION_111));
2371
2372 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2373 "scsi%d\n", instance->host->host_no);
2374
2375 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2376 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2377 " failed with status 0x%x for scsi%d\n",
2378 dcmd->cmd_status, instance->host->host_no);
2379 retval = 1; /* Do a scan if we couldn't get affiliation */
2380 goto out;
2381 }
2382
2383 if (!initial) {
2384 thisVf = new_affiliation_111->thisVf;
2385 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2386 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2387 new_affiliation_111->map[ld].policy[thisVf]) {
2388 dev_warn(&instance->pdev->dev, "SR-IOV: "
2389 "Got new LD/VF affiliation for scsi%d\n",
2390 instance->host->host_no);
2391 memcpy(instance->vf_affiliation_111,
2392 new_affiliation_111,
2393 sizeof(struct MR_LD_VF_AFFILIATION_111));
2394 retval = 1;
2395 goto out;
2396 }
2397 }
2398 out:
2399 if (new_affiliation_111) {
2400 dma_free_coherent(&instance->pdev->dev,
2401 sizeof(struct MR_LD_VF_AFFILIATION_111),
2402 new_affiliation_111,
2403 new_affiliation_111_h);
2404 }
2405
2406 megasas_return_cmd(instance, cmd);
2407
2408 return retval;
2409 }
2410
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2411 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2412 int initial)
2413 {
2414 struct megasas_cmd *cmd;
2415 struct megasas_dcmd_frame *dcmd;
2416 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2417 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2418 dma_addr_t new_affiliation_h;
2419 int i, j, retval = 0, found = 0, doscan = 0;
2420 u8 thisVf;
2421
2422 cmd = megasas_get_cmd(instance);
2423
2424 if (!cmd) {
2425 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2426 "Failed to get cmd for scsi%d\n",
2427 instance->host->host_no);
2428 return -ENOMEM;
2429 }
2430
2431 dcmd = &cmd->frame->dcmd;
2432
2433 if (!instance->vf_affiliation) {
2434 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2435 "affiliation for scsi%d\n", instance->host->host_no);
2436 megasas_return_cmd(instance, cmd);
2437 return -ENOMEM;
2438 }
2439
2440 if (initial)
2441 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2442 sizeof(struct MR_LD_VF_AFFILIATION));
2443 else {
2444 new_affiliation =
2445 dma_alloc_coherent(&instance->pdev->dev,
2446 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2447 &new_affiliation_h, GFP_KERNEL);
2448 if (!new_affiliation) {
2449 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2450 "memory for new affiliation for scsi%d\n",
2451 instance->host->host_no);
2452 megasas_return_cmd(instance, cmd);
2453 return -ENOMEM;
2454 }
2455 }
2456
2457 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2458
2459 dcmd->cmd = MFI_CMD_DCMD;
2460 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2461 dcmd->sge_count = 1;
2462 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2463 dcmd->timeout = 0;
2464 dcmd->pad_0 = 0;
2465 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2466 sizeof(struct MR_LD_VF_AFFILIATION));
2467 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2468
2469 if (initial)
2470 dcmd->sgl.sge32[0].phys_addr =
2471 cpu_to_le32(instance->vf_affiliation_h);
2472 else
2473 dcmd->sgl.sge32[0].phys_addr =
2474 cpu_to_le32(new_affiliation_h);
2475
2476 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2477 sizeof(struct MR_LD_VF_AFFILIATION));
2478
2479 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2480 "scsi%d\n", instance->host->host_no);
2481
2482
2483 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2484 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2485 " failed with status 0x%x for scsi%d\n",
2486 dcmd->cmd_status, instance->host->host_no);
2487 retval = 1; /* Do a scan if we couldn't get affiliation */
2488 goto out;
2489 }
2490
2491 if (!initial) {
2492 if (!new_affiliation->ldCount) {
2493 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2494 "affiliation for passive path for scsi%d\n",
2495 instance->host->host_no);
2496 retval = 1;
2497 goto out;
2498 }
2499 newmap = new_affiliation->map;
2500 savedmap = instance->vf_affiliation->map;
2501 thisVf = new_affiliation->thisVf;
2502 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2503 found = 0;
2504 for (j = 0; j < instance->vf_affiliation->ldCount;
2505 j++) {
2506 if (newmap->ref.targetId ==
2507 savedmap->ref.targetId) {
2508 found = 1;
2509 if (newmap->policy[thisVf] !=
2510 savedmap->policy[thisVf]) {
2511 doscan = 1;
2512 goto out;
2513 }
2514 }
2515 savedmap = (struct MR_LD_VF_MAP *)
2516 ((unsigned char *)savedmap +
2517 savedmap->size);
2518 }
2519 if (!found && newmap->policy[thisVf] !=
2520 MR_LD_ACCESS_HIDDEN) {
2521 doscan = 1;
2522 goto out;
2523 }
2524 newmap = (struct MR_LD_VF_MAP *)
2525 ((unsigned char *)newmap + newmap->size);
2526 }
2527
2528 newmap = new_affiliation->map;
2529 savedmap = instance->vf_affiliation->map;
2530
2531 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2532 found = 0;
2533 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2534 if (savedmap->ref.targetId ==
2535 newmap->ref.targetId) {
2536 found = 1;
2537 if (savedmap->policy[thisVf] !=
2538 newmap->policy[thisVf]) {
2539 doscan = 1;
2540 goto out;
2541 }
2542 }
2543 newmap = (struct MR_LD_VF_MAP *)
2544 ((unsigned char *)newmap +
2545 newmap->size);
2546 }
2547 if (!found && savedmap->policy[thisVf] !=
2548 MR_LD_ACCESS_HIDDEN) {
2549 doscan = 1;
2550 goto out;
2551 }
2552 savedmap = (struct MR_LD_VF_MAP *)
2553 ((unsigned char *)savedmap +
2554 savedmap->size);
2555 }
2556 }
2557 out:
2558 if (doscan) {
2559 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2560 "affiliation for scsi%d\n", instance->host->host_no);
2561 memcpy(instance->vf_affiliation, new_affiliation,
2562 new_affiliation->size);
2563 retval = 1;
2564 }
2565
2566 if (new_affiliation)
2567 dma_free_coherent(&instance->pdev->dev,
2568 (MAX_LOGICAL_DRIVES + 1) *
2569 sizeof(struct MR_LD_VF_AFFILIATION),
2570 new_affiliation, new_affiliation_h);
2571 megasas_return_cmd(instance, cmd);
2572
2573 return retval;
2574 }
2575
2576 /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2577 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2578 int initial)
2579 {
2580 int retval;
2581
2582 if (instance->PlasmaFW111)
2583 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2584 else
2585 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2586 return retval;
2587 }
2588
2589 /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2590 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2591 int initial)
2592 {
2593 struct megasas_cmd *cmd;
2594 struct megasas_dcmd_frame *dcmd;
2595 int retval = 0;
2596
2597 cmd = megasas_get_cmd(instance);
2598
2599 if (!cmd) {
2600 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2601 "Failed to get cmd for scsi%d\n",
2602 instance->host->host_no);
2603 return -ENOMEM;
2604 }
2605
2606 dcmd = &cmd->frame->dcmd;
2607
2608 if (initial) {
2609 instance->hb_host_mem =
2610 dma_alloc_coherent(&instance->pdev->dev,
2611 sizeof(struct MR_CTRL_HB_HOST_MEM),
2612 &instance->hb_host_mem_h,
2613 GFP_KERNEL);
2614 if (!instance->hb_host_mem) {
2615 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2616 " memory for heartbeat host memory for scsi%d\n",
2617 instance->host->host_no);
2618 retval = -ENOMEM;
2619 goto out;
2620 }
2621 }
2622
2623 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2624
2625 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2626 dcmd->cmd = MFI_CMD_DCMD;
2627 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2628 dcmd->sge_count = 1;
2629 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2630 dcmd->timeout = 0;
2631 dcmd->pad_0 = 0;
2632 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2633 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2634
2635 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2636 sizeof(struct MR_CTRL_HB_HOST_MEM));
2637
2638 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2639 instance->host->host_no);
2640
2641 if ((instance->adapter_type != MFI_SERIES) &&
2642 !instance->mask_interrupts)
2643 retval = megasas_issue_blocked_cmd(instance, cmd,
2644 MEGASAS_ROUTINE_WAIT_TIME_VF);
2645 else
2646 retval = megasas_issue_polled(instance, cmd);
2647
2648 if (retval) {
2649 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2650 "_MEM_ALLOC DCMD %s for scsi%d\n",
2651 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2652 "timed out" : "failed", instance->host->host_no);
2653 retval = 1;
2654 }
2655
2656 out:
2657 megasas_return_cmd(instance, cmd);
2658
2659 return retval;
2660 }
2661
2662 /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(struct timer_list * t)2663 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2664 {
2665 struct megasas_instance *instance =
2666 from_timer(instance, t, sriov_heartbeat_timer);
2667
2668 if (instance->hb_host_mem->HB.fwCounter !=
2669 instance->hb_host_mem->HB.driverCounter) {
2670 instance->hb_host_mem->HB.driverCounter =
2671 instance->hb_host_mem->HB.fwCounter;
2672 mod_timer(&instance->sriov_heartbeat_timer,
2673 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2674 } else {
2675 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2676 "completed for scsi%d\n", instance->host->host_no);
2677 schedule_work(&instance->work_init);
2678 }
2679 }
2680
2681 /**
2682 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2683 * @instance: Adapter soft state
2684 *
2685 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2686 * complete all its outstanding commands. Returns error if one or more IOs
2687 * are pending after this time period. It also marks the controller dead.
2688 */
megasas_wait_for_outstanding(struct megasas_instance * instance)2689 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2690 {
2691 int i, sl, outstanding;
2692 u32 reset_index;
2693 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2694 unsigned long flags;
2695 struct list_head clist_local;
2696 struct megasas_cmd *reset_cmd;
2697 u32 fw_state;
2698
2699 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2700 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2701 __func__, __LINE__);
2702 return FAILED;
2703 }
2704
2705 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2706
2707 INIT_LIST_HEAD(&clist_local);
2708 spin_lock_irqsave(&instance->hba_lock, flags);
2709 list_splice_init(&instance->internal_reset_pending_q,
2710 &clist_local);
2711 spin_unlock_irqrestore(&instance->hba_lock, flags);
2712
2713 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2714 for (i = 0; i < wait_time; i++) {
2715 msleep(1000);
2716 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2717 break;
2718 }
2719
2720 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2721 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2722 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2723 return FAILED;
2724 }
2725
2726 reset_index = 0;
2727 while (!list_empty(&clist_local)) {
2728 reset_cmd = list_entry((&clist_local)->next,
2729 struct megasas_cmd, list);
2730 list_del_init(&reset_cmd->list);
2731 if (reset_cmd->scmd) {
2732 reset_cmd->scmd->result = DID_REQUEUE << 16;
2733 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2734 reset_index, reset_cmd,
2735 reset_cmd->scmd->cmnd[0]);
2736
2737 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2738 megasas_return_cmd(instance, reset_cmd);
2739 } else if (reset_cmd->sync_cmd) {
2740 dev_notice(&instance->pdev->dev, "%p synch cmds"
2741 "reset queue\n",
2742 reset_cmd);
2743
2744 reset_cmd->cmd_status_drv = DCMD_INIT;
2745 instance->instancet->fire_cmd(instance,
2746 reset_cmd->frame_phys_addr,
2747 0, instance->reg_set);
2748 } else {
2749 dev_notice(&instance->pdev->dev, "%p unexpected"
2750 "cmds lst\n",
2751 reset_cmd);
2752 }
2753 reset_index++;
2754 }
2755
2756 return SUCCESS;
2757 }
2758
2759 for (i = 0; i < resetwaittime; i++) {
2760 outstanding = atomic_read(&instance->fw_outstanding);
2761
2762 if (!outstanding)
2763 break;
2764
2765 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2766 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2767 "commands to complete\n",i,outstanding);
2768 /*
2769 * Call cmd completion routine. Cmd to be
2770 * be completed directly without depending on isr.
2771 */
2772 megasas_complete_cmd_dpc((unsigned long)instance);
2773 }
2774
2775 msleep(1000);
2776 }
2777
2778 i = 0;
2779 outstanding = atomic_read(&instance->fw_outstanding);
2780 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2781
2782 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2783 goto no_outstanding;
2784
2785 if (instance->disableOnlineCtrlReset)
2786 goto kill_hba_and_failed;
2787 do {
2788 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2789 dev_info(&instance->pdev->dev,
2790 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2791 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2792 if (i == 3)
2793 goto kill_hba_and_failed;
2794 megasas_do_ocr(instance);
2795
2796 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2797 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2798 __func__, __LINE__);
2799 return FAILED;
2800 }
2801 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2802 __func__, __LINE__);
2803
2804 for (sl = 0; sl < 10; sl++)
2805 msleep(500);
2806
2807 outstanding = atomic_read(&instance->fw_outstanding);
2808
2809 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2810 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2811 goto no_outstanding;
2812 }
2813 i++;
2814 } while (i <= 3);
2815
2816 no_outstanding:
2817
2818 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2819 __func__, __LINE__);
2820 return SUCCESS;
2821
2822 kill_hba_and_failed:
2823
2824 /* Reset not supported, kill adapter */
2825 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2826 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2827 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2828 atomic_read(&instance->fw_outstanding));
2829 megasas_dump_pending_frames(instance);
2830 megaraid_sas_kill_hba(instance);
2831
2832 return FAILED;
2833 }
2834
2835 /**
2836 * megasas_generic_reset - Generic reset routine
2837 * @scmd: Mid-layer SCSI command
2838 *
2839 * This routine implements a generic reset handler for device, bus and host
2840 * reset requests. Device, bus and host specific reset handlers can use this
2841 * function after they do their specific tasks.
2842 */
megasas_generic_reset(struct scsi_cmnd * scmd)2843 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2844 {
2845 int ret_val;
2846 struct megasas_instance *instance;
2847
2848 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2849
2850 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2851 scmd->cmnd[0], scmd->retries);
2852
2853 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2854 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2855 return FAILED;
2856 }
2857
2858 ret_val = megasas_wait_for_outstanding(instance);
2859 if (ret_val == SUCCESS)
2860 dev_notice(&instance->pdev->dev, "reset successful\n");
2861 else
2862 dev_err(&instance->pdev->dev, "failed to do reset\n");
2863
2864 return ret_val;
2865 }
2866
2867 /**
2868 * megasas_reset_timer - quiesce the adapter if required
2869 * @scmd: scsi cmnd
2870 *
2871 * Sets the FW busy flag and reduces the host->can_queue if the
2872 * cmd has not been completed within the timeout period.
2873 */
2874 static enum
megasas_reset_timer(struct scsi_cmnd * scmd)2875 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2876 {
2877 struct megasas_instance *instance;
2878 unsigned long flags;
2879
2880 if (time_after(jiffies, scmd->jiffies_at_alloc +
2881 (scmd_timeout * 2) * HZ)) {
2882 return BLK_EH_DONE;
2883 }
2884
2885 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2886 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2887 /* FW is busy, throttle IO */
2888 spin_lock_irqsave(instance->host->host_lock, flags);
2889
2890 instance->host->can_queue = instance->throttlequeuedepth;
2891 instance->last_time = jiffies;
2892 instance->flag |= MEGASAS_FW_BUSY;
2893
2894 spin_unlock_irqrestore(instance->host->host_lock, flags);
2895 }
2896 return BLK_EH_RESET_TIMER;
2897 }
2898
2899 /**
2900 * megasas_dump - This function will print hexdump of provided buffer.
2901 * @buf: Buffer to be dumped
2902 * @sz: Size in bytes
2903 * @format: Different formats of dumping e.g. format=n will
2904 * cause only 'n' 32 bit words to be dumped in a single
2905 * line.
2906 */
2907 inline void
megasas_dump(void * buf,int sz,int format)2908 megasas_dump(void *buf, int sz, int format)
2909 {
2910 int i;
2911 __le32 *buf_loc = (__le32 *)buf;
2912
2913 for (i = 0; i < (sz / sizeof(__le32)); i++) {
2914 if ((i % format) == 0) {
2915 if (i != 0)
2916 printk(KERN_CONT "\n");
2917 printk(KERN_CONT "%08x: ", (i * 4));
2918 }
2919 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2920 }
2921 printk(KERN_CONT "\n");
2922 }
2923
2924 /**
2925 * megasas_dump_reg_set - This function will print hexdump of register set
2926 * @reg_set: Register set to be dumped
2927 */
2928 inline void
megasas_dump_reg_set(void __iomem * reg_set)2929 megasas_dump_reg_set(void __iomem *reg_set)
2930 {
2931 unsigned int i, sz = 256;
2932 u32 __iomem *reg = (u32 __iomem *)reg_set;
2933
2934 for (i = 0; i < (sz / sizeof(u32)); i++)
2935 printk("%08x: %08x\n", (i * 4), readl(®[i]));
2936 }
2937
2938 /**
2939 * megasas_dump_fusion_io - This function will print key details
2940 * of SCSI IO
2941 * @scmd: SCSI command pointer of SCSI IO
2942 */
2943 void
megasas_dump_fusion_io(struct scsi_cmnd * scmd)2944 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2945 {
2946 struct megasas_cmd_fusion *cmd;
2947 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2948 struct megasas_instance *instance;
2949
2950 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2951 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2952
2953 scmd_printk(KERN_INFO, scmd,
2954 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
2955 scmd, scmd->retries, scmd->allowed);
2956 scsi_print_command(scmd);
2957
2958 if (cmd) {
2959 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2960 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2961 scmd_printk(KERN_INFO, scmd,
2962 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
2963 req_desc->SCSIIO.RequestFlags,
2964 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2965 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2966
2967 printk(KERN_INFO "IO request frame:\n");
2968 megasas_dump(cmd->io_request,
2969 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2970 printk(KERN_INFO "Chain frame:\n");
2971 megasas_dump(cmd->sg_frame,
2972 instance->max_chain_frame_sz, 8);
2973 }
2974
2975 }
2976
2977 /*
2978 * megasas_dump_sys_regs - This function will dump system registers through
2979 * sysfs.
2980 * @reg_set: Pointer to System register set.
2981 * @buf: Buffer to which output is to be written.
2982 * @return: Number of bytes written to buffer.
2983 */
2984 static inline ssize_t
megasas_dump_sys_regs(void __iomem * reg_set,char * buf)2985 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2986 {
2987 unsigned int i, sz = 256;
2988 int bytes_wrote = 0;
2989 char *loc = (char *)buf;
2990 u32 __iomem *reg = (u32 __iomem *)reg_set;
2991
2992 for (i = 0; i < sz / sizeof(u32); i++) {
2993 bytes_wrote += scnprintf(loc + bytes_wrote,
2994 PAGE_SIZE - bytes_wrote,
2995 "%08x: %08x\n", (i * 4),
2996 readl(®[i]));
2997 }
2998 return bytes_wrote;
2999 }
3000
3001 /**
3002 * megasas_reset_bus_host - Bus & host reset handler entry point
3003 * @scmd: Mid-layer SCSI command
3004 */
megasas_reset_bus_host(struct scsi_cmnd * scmd)3005 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3006 {
3007 int ret;
3008 struct megasas_instance *instance;
3009
3010 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3011
3012 scmd_printk(KERN_INFO, scmd,
3013 "OCR is requested due to IO timeout!!\n");
3014
3015 scmd_printk(KERN_INFO, scmd,
3016 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
3017 scmd->device->host->shost_state,
3018 scsi_host_busy(scmd->device->host),
3019 atomic_read(&instance->fw_outstanding));
3020 /*
3021 * First wait for all commands to complete
3022 */
3023 if (instance->adapter_type == MFI_SERIES) {
3024 ret = megasas_generic_reset(scmd);
3025 } else {
3026 megasas_dump_fusion_io(scmd);
3027 ret = megasas_reset_fusion(scmd->device->host,
3028 SCSIIO_TIMEOUT_OCR);
3029 }
3030
3031 return ret;
3032 }
3033
3034 /**
3035 * megasas_task_abort - Issues task abort request to firmware
3036 * (supported only for fusion adapters)
3037 * @scmd: SCSI command pointer
3038 */
megasas_task_abort(struct scsi_cmnd * scmd)3039 static int megasas_task_abort(struct scsi_cmnd *scmd)
3040 {
3041 int ret;
3042 struct megasas_instance *instance;
3043
3044 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3045
3046 if (instance->adapter_type != MFI_SERIES)
3047 ret = megasas_task_abort_fusion(scmd);
3048 else {
3049 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3050 ret = FAILED;
3051 }
3052
3053 return ret;
3054 }
3055
3056 /**
3057 * megasas_reset_target: Issues target reset request to firmware
3058 * (supported only for fusion adapters)
3059 * @scmd: SCSI command pointer
3060 */
megasas_reset_target(struct scsi_cmnd * scmd)3061 static int megasas_reset_target(struct scsi_cmnd *scmd)
3062 {
3063 int ret;
3064 struct megasas_instance *instance;
3065
3066 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3067
3068 if (instance->adapter_type != MFI_SERIES)
3069 ret = megasas_reset_target_fusion(scmd);
3070 else {
3071 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3072 ret = FAILED;
3073 }
3074
3075 return ret;
3076 }
3077
3078 /**
3079 * megasas_bios_param - Returns disk geometry for a disk
3080 * @sdev: device handle
3081 * @bdev: block device
3082 * @capacity: drive capacity
3083 * @geom: geometry parameters
3084 */
3085 static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])3086 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3087 sector_t capacity, int geom[])
3088 {
3089 int heads;
3090 int sectors;
3091 sector_t cylinders;
3092 unsigned long tmp;
3093
3094 /* Default heads (64) & sectors (32) */
3095 heads = 64;
3096 sectors = 32;
3097
3098 tmp = heads * sectors;
3099 cylinders = capacity;
3100
3101 sector_div(cylinders, tmp);
3102
3103 /*
3104 * Handle extended translation size for logical drives > 1Gb
3105 */
3106
3107 if (capacity >= 0x200000) {
3108 heads = 255;
3109 sectors = 63;
3110 tmp = heads*sectors;
3111 cylinders = capacity;
3112 sector_div(cylinders, tmp);
3113 }
3114
3115 geom[0] = heads;
3116 geom[1] = sectors;
3117 geom[2] = cylinders;
3118
3119 return 0;
3120 }
3121
3122 static void megasas_aen_polling(struct work_struct *work);
3123
3124 /**
3125 * megasas_service_aen - Processes an event notification
3126 * @instance: Adapter soft state
3127 * @cmd: AEN command completed by the ISR
3128 *
3129 * For AEN, driver sends a command down to FW that is held by the FW till an
3130 * event occurs. When an event of interest occurs, FW completes the command
3131 * that it was previously holding.
3132 *
3133 * This routines sends SIGIO signal to processes that have registered with the
3134 * driver for AEN.
3135 */
3136 static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)3137 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3138 {
3139 unsigned long flags;
3140
3141 /*
3142 * Don't signal app if it is just an aborted previously registered aen
3143 */
3144 if ((!cmd->abort_aen) && (instance->unload == 0)) {
3145 spin_lock_irqsave(&poll_aen_lock, flags);
3146 megasas_poll_wait_aen = 1;
3147 spin_unlock_irqrestore(&poll_aen_lock, flags);
3148 wake_up(&megasas_poll_wait);
3149 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3150 }
3151 else
3152 cmd->abort_aen = 0;
3153
3154 instance->aen_cmd = NULL;
3155
3156 megasas_return_cmd(instance, cmd);
3157
3158 if ((instance->unload == 0) &&
3159 ((instance->issuepend_done == 1))) {
3160 struct megasas_aen_event *ev;
3161
3162 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3163 if (!ev) {
3164 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3165 } else {
3166 ev->instance = instance;
3167 instance->ev = ev;
3168 INIT_DELAYED_WORK(&ev->hotplug_work,
3169 megasas_aen_polling);
3170 schedule_delayed_work(&ev->hotplug_work, 0);
3171 }
3172 }
3173 }
3174
3175 static ssize_t
fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3176 fw_crash_buffer_store(struct device *cdev,
3177 struct device_attribute *attr, const char *buf, size_t count)
3178 {
3179 struct Scsi_Host *shost = class_to_shost(cdev);
3180 struct megasas_instance *instance =
3181 (struct megasas_instance *) shost->hostdata;
3182 int val = 0;
3183 unsigned long flags;
3184
3185 if (kstrtoint(buf, 0, &val) != 0)
3186 return -EINVAL;
3187
3188 spin_lock_irqsave(&instance->crashdump_lock, flags);
3189 instance->fw_crash_buffer_offset = val;
3190 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3191 return strlen(buf);
3192 }
3193
3194 static ssize_t
fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3195 fw_crash_buffer_show(struct device *cdev,
3196 struct device_attribute *attr, char *buf)
3197 {
3198 struct Scsi_Host *shost = class_to_shost(cdev);
3199 struct megasas_instance *instance =
3200 (struct megasas_instance *) shost->hostdata;
3201 u32 size;
3202 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3203 unsigned long chunk_left_bytes;
3204 unsigned long src_addr;
3205 unsigned long flags;
3206 u32 buff_offset;
3207
3208 spin_lock_irqsave(&instance->crashdump_lock, flags);
3209 buff_offset = instance->fw_crash_buffer_offset;
3210 if (!instance->crash_dump_buf &&
3211 !((instance->fw_crash_state == AVAILABLE) ||
3212 (instance->fw_crash_state == COPYING))) {
3213 dev_err(&instance->pdev->dev,
3214 "Firmware crash dump is not available\n");
3215 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3216 return -EINVAL;
3217 }
3218
3219 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3220 dev_err(&instance->pdev->dev,
3221 "Firmware crash dump offset is out of range\n");
3222 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3223 return 0;
3224 }
3225
3226 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3227 chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3228 size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3229 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3230
3231 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3232 (buff_offset % dmachunk);
3233 memcpy(buf, (void *)src_addr, size);
3234 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3235
3236 return size;
3237 }
3238
3239 static ssize_t
fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3240 fw_crash_buffer_size_show(struct device *cdev,
3241 struct device_attribute *attr, char *buf)
3242 {
3243 struct Scsi_Host *shost = class_to_shost(cdev);
3244 struct megasas_instance *instance =
3245 (struct megasas_instance *) shost->hostdata;
3246
3247 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3248 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3249 }
3250
3251 static ssize_t
fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3252 fw_crash_state_store(struct device *cdev,
3253 struct device_attribute *attr, const char *buf, size_t count)
3254 {
3255 struct Scsi_Host *shost = class_to_shost(cdev);
3256 struct megasas_instance *instance =
3257 (struct megasas_instance *) shost->hostdata;
3258 int val = 0;
3259 unsigned long flags;
3260
3261 if (kstrtoint(buf, 0, &val) != 0)
3262 return -EINVAL;
3263
3264 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3265 dev_err(&instance->pdev->dev, "application updates invalid "
3266 "firmware crash state\n");
3267 return -EINVAL;
3268 }
3269
3270 instance->fw_crash_state = val;
3271
3272 if ((val == COPIED) || (val == COPY_ERROR)) {
3273 spin_lock_irqsave(&instance->crashdump_lock, flags);
3274 megasas_free_host_crash_buffer(instance);
3275 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3276 if (val == COPY_ERROR)
3277 dev_info(&instance->pdev->dev, "application failed to "
3278 "copy Firmware crash dump\n");
3279 else
3280 dev_info(&instance->pdev->dev, "Firmware crash dump "
3281 "copied successfully\n");
3282 }
3283 return strlen(buf);
3284 }
3285
3286 static ssize_t
fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3287 fw_crash_state_show(struct device *cdev,
3288 struct device_attribute *attr, char *buf)
3289 {
3290 struct Scsi_Host *shost = class_to_shost(cdev);
3291 struct megasas_instance *instance =
3292 (struct megasas_instance *) shost->hostdata;
3293
3294 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3295 }
3296
3297 static ssize_t
page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3298 page_size_show(struct device *cdev,
3299 struct device_attribute *attr, char *buf)
3300 {
3301 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3302 }
3303
3304 static ssize_t
ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3305 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3306 char *buf)
3307 {
3308 struct Scsi_Host *shost = class_to_shost(cdev);
3309 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3310
3311 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3312 }
3313
3314 static ssize_t
fw_cmds_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3315 fw_cmds_outstanding_show(struct device *cdev,
3316 struct device_attribute *attr, char *buf)
3317 {
3318 struct Scsi_Host *shost = class_to_shost(cdev);
3319 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3320
3321 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3322 }
3323
3324 static ssize_t
enable_sdev_max_qd_show(struct device * cdev,struct device_attribute * attr,char * buf)3325 enable_sdev_max_qd_show(struct device *cdev,
3326 struct device_attribute *attr, char *buf)
3327 {
3328 struct Scsi_Host *shost = class_to_shost(cdev);
3329 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3330
3331 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3332 }
3333
3334 static ssize_t
enable_sdev_max_qd_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3335 enable_sdev_max_qd_store(struct device *cdev,
3336 struct device_attribute *attr, const char *buf, size_t count)
3337 {
3338 struct Scsi_Host *shost = class_to_shost(cdev);
3339 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3340 u32 val = 0;
3341 bool is_target_prop;
3342 int ret_target_prop = DCMD_FAILED;
3343 struct scsi_device *sdev;
3344
3345 if (kstrtou32(buf, 0, &val) != 0) {
3346 pr_err("megasas: could not set enable_sdev_max_qd\n");
3347 return -EINVAL;
3348 }
3349
3350 mutex_lock(&instance->reset_mutex);
3351 if (val)
3352 instance->enable_sdev_max_qd = true;
3353 else
3354 instance->enable_sdev_max_qd = false;
3355
3356 shost_for_each_device(sdev, shost) {
3357 ret_target_prop = megasas_get_target_prop(instance, sdev);
3358 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3359 megasas_set_fw_assisted_qd(sdev, is_target_prop);
3360 }
3361 mutex_unlock(&instance->reset_mutex);
3362
3363 return strlen(buf);
3364 }
3365
3366 static ssize_t
dump_system_regs_show(struct device * cdev,struct device_attribute * attr,char * buf)3367 dump_system_regs_show(struct device *cdev,
3368 struct device_attribute *attr, char *buf)
3369 {
3370 struct Scsi_Host *shost = class_to_shost(cdev);
3371 struct megasas_instance *instance =
3372 (struct megasas_instance *)shost->hostdata;
3373
3374 return megasas_dump_sys_regs(instance->reg_set, buf);
3375 }
3376
3377 static ssize_t
raid_map_id_show(struct device * cdev,struct device_attribute * attr,char * buf)3378 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3379 char *buf)
3380 {
3381 struct Scsi_Host *shost = class_to_shost(cdev);
3382 struct megasas_instance *instance =
3383 (struct megasas_instance *)shost->hostdata;
3384
3385 return snprintf(buf, PAGE_SIZE, "%ld\n",
3386 (unsigned long)instance->map_id);
3387 }
3388
3389 static DEVICE_ATTR_RW(fw_crash_buffer);
3390 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3391 static DEVICE_ATTR_RW(fw_crash_state);
3392 static DEVICE_ATTR_RO(page_size);
3393 static DEVICE_ATTR_RO(ldio_outstanding);
3394 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3395 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3396 static DEVICE_ATTR_RO(dump_system_regs);
3397 static DEVICE_ATTR_RO(raid_map_id);
3398
3399 static struct device_attribute *megaraid_host_attrs[] = {
3400 &dev_attr_fw_crash_buffer_size,
3401 &dev_attr_fw_crash_buffer,
3402 &dev_attr_fw_crash_state,
3403 &dev_attr_page_size,
3404 &dev_attr_ldio_outstanding,
3405 &dev_attr_fw_cmds_outstanding,
3406 &dev_attr_enable_sdev_max_qd,
3407 &dev_attr_dump_system_regs,
3408 &dev_attr_raid_map_id,
3409 NULL,
3410 };
3411
3412 /*
3413 * Scsi host template for megaraid_sas driver
3414 */
3415 static struct scsi_host_template megasas_template = {
3416
3417 .module = THIS_MODULE,
3418 .name = "Avago SAS based MegaRAID driver",
3419 .proc_name = "megaraid_sas",
3420 .slave_configure = megasas_slave_configure,
3421 .slave_alloc = megasas_slave_alloc,
3422 .slave_destroy = megasas_slave_destroy,
3423 .queuecommand = megasas_queue_command,
3424 .eh_target_reset_handler = megasas_reset_target,
3425 .eh_abort_handler = megasas_task_abort,
3426 .eh_host_reset_handler = megasas_reset_bus_host,
3427 .eh_timed_out = megasas_reset_timer,
3428 .shost_attrs = megaraid_host_attrs,
3429 .bios_param = megasas_bios_param,
3430 .change_queue_depth = scsi_change_queue_depth,
3431 .max_segment_size = 0xffffffff,
3432 };
3433
3434 /**
3435 * megasas_complete_int_cmd - Completes an internal command
3436 * @instance: Adapter soft state
3437 * @cmd: Command to be completed
3438 *
3439 * The megasas_issue_blocked_cmd() function waits for a command to complete
3440 * after it issues a command. This function wakes up that waiting routine by
3441 * calling wake_up() on the wait queue.
3442 */
3443 static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3444 megasas_complete_int_cmd(struct megasas_instance *instance,
3445 struct megasas_cmd *cmd)
3446 {
3447 if (cmd->cmd_status_drv == DCMD_INIT)
3448 cmd->cmd_status_drv =
3449 (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3450 DCMD_SUCCESS : DCMD_FAILED;
3451
3452 wake_up(&instance->int_cmd_wait_q);
3453 }
3454
3455 /**
3456 * megasas_complete_abort - Completes aborting a command
3457 * @instance: Adapter soft state
3458 * @cmd: Cmd that was issued to abort another cmd
3459 *
3460 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3461 * after it issues an abort on a previously issued command. This function
3462 * wakes up all functions waiting on the same wait queue.
3463 */
3464 static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3465 megasas_complete_abort(struct megasas_instance *instance,
3466 struct megasas_cmd *cmd)
3467 {
3468 if (cmd->sync_cmd) {
3469 cmd->sync_cmd = 0;
3470 cmd->cmd_status_drv = DCMD_SUCCESS;
3471 wake_up(&instance->abort_cmd_wait_q);
3472 }
3473 }
3474
3475 /**
3476 * megasas_complete_cmd - Completes a command
3477 * @instance: Adapter soft state
3478 * @cmd: Command to be completed
3479 * @alt_status: If non-zero, use this value as status to
3480 * SCSI mid-layer instead of the value returned
3481 * by the FW. This should be used if caller wants
3482 * an alternate status (as in the case of aborted
3483 * commands)
3484 */
3485 void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3486 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3487 u8 alt_status)
3488 {
3489 int exception = 0;
3490 struct megasas_header *hdr = &cmd->frame->hdr;
3491 unsigned long flags;
3492 struct fusion_context *fusion = instance->ctrl_context;
3493 u32 opcode, status;
3494
3495 /* flag for the retry reset */
3496 cmd->retry_for_fw_reset = 0;
3497
3498 if (cmd->scmd)
3499 cmd->scmd->SCp.ptr = NULL;
3500
3501 switch (hdr->cmd) {
3502 case MFI_CMD_INVALID:
3503 /* Some older 1068 controller FW may keep a pended
3504 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3505 when booting the kdump kernel. Ignore this command to
3506 prevent a kernel panic on shutdown of the kdump kernel. */
3507 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3508 "completed\n");
3509 dev_warn(&instance->pdev->dev, "If you have a controller "
3510 "other than PERC5, please upgrade your firmware\n");
3511 break;
3512 case MFI_CMD_PD_SCSI_IO:
3513 case MFI_CMD_LD_SCSI_IO:
3514
3515 /*
3516 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3517 * issued either through an IO path or an IOCTL path. If it
3518 * was via IOCTL, we will send it to internal completion.
3519 */
3520 if (cmd->sync_cmd) {
3521 cmd->sync_cmd = 0;
3522 megasas_complete_int_cmd(instance, cmd);
3523 break;
3524 }
3525 fallthrough;
3526
3527 case MFI_CMD_LD_READ:
3528 case MFI_CMD_LD_WRITE:
3529
3530 if (alt_status) {
3531 cmd->scmd->result = alt_status << 16;
3532 exception = 1;
3533 }
3534
3535 if (exception) {
3536
3537 atomic_dec(&instance->fw_outstanding);
3538
3539 scsi_dma_unmap(cmd->scmd);
3540 cmd->scmd->scsi_done(cmd->scmd);
3541 megasas_return_cmd(instance, cmd);
3542
3543 break;
3544 }
3545
3546 switch (hdr->cmd_status) {
3547
3548 case MFI_STAT_OK:
3549 cmd->scmd->result = DID_OK << 16;
3550 break;
3551
3552 case MFI_STAT_SCSI_IO_FAILED:
3553 case MFI_STAT_LD_INIT_IN_PROGRESS:
3554 cmd->scmd->result =
3555 (DID_ERROR << 16) | hdr->scsi_status;
3556 break;
3557
3558 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3559
3560 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3561
3562 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3563 memset(cmd->scmd->sense_buffer, 0,
3564 SCSI_SENSE_BUFFERSIZE);
3565 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3566 hdr->sense_len);
3567
3568 cmd->scmd->result |= DRIVER_SENSE << 24;
3569 }
3570
3571 break;
3572
3573 case MFI_STAT_LD_OFFLINE:
3574 case MFI_STAT_DEVICE_NOT_FOUND:
3575 cmd->scmd->result = DID_BAD_TARGET << 16;
3576 break;
3577
3578 default:
3579 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3580 hdr->cmd_status);
3581 cmd->scmd->result = DID_ERROR << 16;
3582 break;
3583 }
3584
3585 atomic_dec(&instance->fw_outstanding);
3586
3587 scsi_dma_unmap(cmd->scmd);
3588 cmd->scmd->scsi_done(cmd->scmd);
3589 megasas_return_cmd(instance, cmd);
3590
3591 break;
3592
3593 case MFI_CMD_SMP:
3594 case MFI_CMD_STP:
3595 case MFI_CMD_NVME:
3596 case MFI_CMD_TOOLBOX:
3597 megasas_complete_int_cmd(instance, cmd);
3598 break;
3599
3600 case MFI_CMD_DCMD:
3601 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3602 /* Check for LD map update */
3603 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3604 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3605 fusion->fast_path_io = 0;
3606 spin_lock_irqsave(instance->host->host_lock, flags);
3607 status = cmd->frame->hdr.cmd_status;
3608 instance->map_update_cmd = NULL;
3609 if (status != MFI_STAT_OK) {
3610 if (status != MFI_STAT_NOT_FOUND)
3611 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3612 cmd->frame->hdr.cmd_status);
3613 else {
3614 megasas_return_cmd(instance, cmd);
3615 spin_unlock_irqrestore(
3616 instance->host->host_lock,
3617 flags);
3618 break;
3619 }
3620 }
3621
3622 megasas_return_cmd(instance, cmd);
3623
3624 /*
3625 * Set fast path IO to ZERO.
3626 * Validate Map will set proper value.
3627 * Meanwhile all IOs will go as LD IO.
3628 */
3629 if (status == MFI_STAT_OK &&
3630 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3631 instance->map_id++;
3632 fusion->fast_path_io = 1;
3633 } else {
3634 fusion->fast_path_io = 0;
3635 }
3636
3637 megasas_sync_map_info(instance);
3638 spin_unlock_irqrestore(instance->host->host_lock,
3639 flags);
3640 break;
3641 }
3642 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3643 opcode == MR_DCMD_CTRL_EVENT_GET) {
3644 spin_lock_irqsave(&poll_aen_lock, flags);
3645 megasas_poll_wait_aen = 0;
3646 spin_unlock_irqrestore(&poll_aen_lock, flags);
3647 }
3648
3649 /* FW has an updated PD sequence */
3650 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3651 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3652
3653 spin_lock_irqsave(instance->host->host_lock, flags);
3654 status = cmd->frame->hdr.cmd_status;
3655 instance->jbod_seq_cmd = NULL;
3656 megasas_return_cmd(instance, cmd);
3657
3658 if (status == MFI_STAT_OK) {
3659 instance->pd_seq_map_id++;
3660 /* Re-register a pd sync seq num cmd */
3661 if (megasas_sync_pd_seq_num(instance, true))
3662 instance->use_seqnum_jbod_fp = false;
3663 } else
3664 instance->use_seqnum_jbod_fp = false;
3665
3666 spin_unlock_irqrestore(instance->host->host_lock, flags);
3667 break;
3668 }
3669
3670 /*
3671 * See if got an event notification
3672 */
3673 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3674 megasas_service_aen(instance, cmd);
3675 else
3676 megasas_complete_int_cmd(instance, cmd);
3677
3678 break;
3679
3680 case MFI_CMD_ABORT:
3681 /*
3682 * Cmd issued to abort another cmd returned
3683 */
3684 megasas_complete_abort(instance, cmd);
3685 break;
3686
3687 default:
3688 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3689 hdr->cmd);
3690 megasas_complete_int_cmd(instance, cmd);
3691 break;
3692 }
3693 }
3694
3695 /**
3696 * megasas_issue_pending_cmds_again - issue all pending cmds
3697 * in FW again because of the fw reset
3698 * @instance: Adapter soft state
3699 */
3700 static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3701 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3702 {
3703 struct megasas_cmd *cmd;
3704 struct list_head clist_local;
3705 union megasas_evt_class_locale class_locale;
3706 unsigned long flags;
3707 u32 seq_num;
3708
3709 INIT_LIST_HEAD(&clist_local);
3710 spin_lock_irqsave(&instance->hba_lock, flags);
3711 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3712 spin_unlock_irqrestore(&instance->hba_lock, flags);
3713
3714 while (!list_empty(&clist_local)) {
3715 cmd = list_entry((&clist_local)->next,
3716 struct megasas_cmd, list);
3717 list_del_init(&cmd->list);
3718
3719 if (cmd->sync_cmd || cmd->scmd) {
3720 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3721 "detected to be pending while HBA reset\n",
3722 cmd, cmd->scmd, cmd->sync_cmd);
3723
3724 cmd->retry_for_fw_reset++;
3725
3726 if (cmd->retry_for_fw_reset == 3) {
3727 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3728 "was tried multiple times during reset."
3729 "Shutting down the HBA\n",
3730 cmd, cmd->scmd, cmd->sync_cmd);
3731 instance->instancet->disable_intr(instance);
3732 atomic_set(&instance->fw_reset_no_pci_access, 1);
3733 megaraid_sas_kill_hba(instance);
3734 return;
3735 }
3736 }
3737
3738 if (cmd->sync_cmd == 1) {
3739 if (cmd->scmd) {
3740 dev_notice(&instance->pdev->dev, "unexpected"
3741 "cmd attached to internal command!\n");
3742 }
3743 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3744 "on the internal reset queue,"
3745 "issue it again.\n", cmd);
3746 cmd->cmd_status_drv = DCMD_INIT;
3747 instance->instancet->fire_cmd(instance,
3748 cmd->frame_phys_addr,
3749 0, instance->reg_set);
3750 } else if (cmd->scmd) {
3751 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3752 "detected on the internal queue, issue again.\n",
3753 cmd, cmd->scmd->cmnd[0]);
3754
3755 atomic_inc(&instance->fw_outstanding);
3756 instance->instancet->fire_cmd(instance,
3757 cmd->frame_phys_addr,
3758 cmd->frame_count-1, instance->reg_set);
3759 } else {
3760 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3761 "internal reset defer list while re-issue!!\n",
3762 cmd);
3763 }
3764 }
3765
3766 if (instance->aen_cmd) {
3767 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3768 megasas_return_cmd(instance, instance->aen_cmd);
3769
3770 instance->aen_cmd = NULL;
3771 }
3772
3773 /*
3774 * Initiate AEN (Asynchronous Event Notification)
3775 */
3776 seq_num = instance->last_seq_num;
3777 class_locale.members.reserved = 0;
3778 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3779 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3780
3781 megasas_register_aen(instance, seq_num, class_locale.word);
3782 }
3783
3784 /*
3785 * Move the internal reset pending commands to a deferred queue.
3786 *
3787 * We move the commands pending at internal reset time to a
3788 * pending queue. This queue would be flushed after successful
3789 * completion of the internal reset sequence. if the internal reset
3790 * did not complete in time, the kernel reset handler would flush
3791 * these commands.
3792 */
3793 static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3794 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3795 {
3796 struct megasas_cmd *cmd;
3797 int i;
3798 u16 max_cmd = instance->max_fw_cmds;
3799 u32 defer_index;
3800 unsigned long flags;
3801
3802 defer_index = 0;
3803 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3804 for (i = 0; i < max_cmd; i++) {
3805 cmd = instance->cmd_list[i];
3806 if (cmd->sync_cmd == 1 || cmd->scmd) {
3807 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3808 "on the defer queue as internal\n",
3809 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3810
3811 if (!list_empty(&cmd->list)) {
3812 dev_notice(&instance->pdev->dev, "ERROR while"
3813 " moving this cmd:%p, %d %p, it was"
3814 "discovered on some list?\n",
3815 cmd, cmd->sync_cmd, cmd->scmd);
3816
3817 list_del_init(&cmd->list);
3818 }
3819 defer_index++;
3820 list_add_tail(&cmd->list,
3821 &instance->internal_reset_pending_q);
3822 }
3823 }
3824 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3825 }
3826
3827
3828 static void
process_fw_state_change_wq(struct work_struct * work)3829 process_fw_state_change_wq(struct work_struct *work)
3830 {
3831 struct megasas_instance *instance =
3832 container_of(work, struct megasas_instance, work_init);
3833 u32 wait;
3834 unsigned long flags;
3835
3836 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3837 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3838 atomic_read(&instance->adprecovery));
3839 return ;
3840 }
3841
3842 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3843 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3844 "state, restarting it...\n");
3845
3846 instance->instancet->disable_intr(instance);
3847 atomic_set(&instance->fw_outstanding, 0);
3848
3849 atomic_set(&instance->fw_reset_no_pci_access, 1);
3850 instance->instancet->adp_reset(instance, instance->reg_set);
3851 atomic_set(&instance->fw_reset_no_pci_access, 0);
3852
3853 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3854 "initiating next stage...\n");
3855
3856 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3857 "state 2 starting...\n");
3858
3859 /* waiting for about 20 second before start the second init */
3860 for (wait = 0; wait < 30; wait++) {
3861 msleep(1000);
3862 }
3863
3864 if (megasas_transition_to_ready(instance, 1)) {
3865 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3866
3867 atomic_set(&instance->fw_reset_no_pci_access, 1);
3868 megaraid_sas_kill_hba(instance);
3869 return ;
3870 }
3871
3872 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3873 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3874 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3875 ) {
3876 *instance->consumer = *instance->producer;
3877 } else {
3878 *instance->consumer = 0;
3879 *instance->producer = 0;
3880 }
3881
3882 megasas_issue_init_mfi(instance);
3883
3884 spin_lock_irqsave(&instance->hba_lock, flags);
3885 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3886 spin_unlock_irqrestore(&instance->hba_lock, flags);
3887 instance->instancet->enable_intr(instance);
3888
3889 megasas_issue_pending_cmds_again(instance);
3890 instance->issuepend_done = 1;
3891 }
3892 }
3893
3894 /**
3895 * megasas_deplete_reply_queue - Processes all completed commands
3896 * @instance: Adapter soft state
3897 * @alt_status: Alternate status to be returned to
3898 * SCSI mid-layer instead of the status
3899 * returned by the FW
3900 * Note: this must be called with hba lock held
3901 */
3902 static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)3903 megasas_deplete_reply_queue(struct megasas_instance *instance,
3904 u8 alt_status)
3905 {
3906 u32 mfiStatus;
3907 u32 fw_state;
3908
3909 if ((mfiStatus = instance->instancet->check_reset(instance,
3910 instance->reg_set)) == 1) {
3911 return IRQ_HANDLED;
3912 }
3913
3914 mfiStatus = instance->instancet->clear_intr(instance);
3915 if (mfiStatus == 0) {
3916 /* Hardware may not set outbound_intr_status in MSI-X mode */
3917 if (!instance->msix_vectors)
3918 return IRQ_NONE;
3919 }
3920
3921 instance->mfiStatus = mfiStatus;
3922
3923 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3924 fw_state = instance->instancet->read_fw_status_reg(
3925 instance) & MFI_STATE_MASK;
3926
3927 if (fw_state != MFI_STATE_FAULT) {
3928 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3929 fw_state);
3930 }
3931
3932 if ((fw_state == MFI_STATE_FAULT) &&
3933 (instance->disableOnlineCtrlReset == 0)) {
3934 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3935
3936 if ((instance->pdev->device ==
3937 PCI_DEVICE_ID_LSI_SAS1064R) ||
3938 (instance->pdev->device ==
3939 PCI_DEVICE_ID_DELL_PERC5) ||
3940 (instance->pdev->device ==
3941 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3942
3943 *instance->consumer =
3944 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3945 }
3946
3947
3948 instance->instancet->disable_intr(instance);
3949 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3950 instance->issuepend_done = 0;
3951
3952 atomic_set(&instance->fw_outstanding, 0);
3953 megasas_internal_reset_defer_cmds(instance);
3954
3955 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3956 fw_state, atomic_read(&instance->adprecovery));
3957
3958 schedule_work(&instance->work_init);
3959 return IRQ_HANDLED;
3960
3961 } else {
3962 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3963 fw_state, instance->disableOnlineCtrlReset);
3964 }
3965 }
3966
3967 tasklet_schedule(&instance->isr_tasklet);
3968 return IRQ_HANDLED;
3969 }
3970
3971 /**
3972 * megasas_isr - isr entry point
3973 * @irq: IRQ number
3974 * @devp: IRQ context address
3975 */
megasas_isr(int irq,void * devp)3976 static irqreturn_t megasas_isr(int irq, void *devp)
3977 {
3978 struct megasas_irq_context *irq_context = devp;
3979 struct megasas_instance *instance = irq_context->instance;
3980 unsigned long flags;
3981 irqreturn_t rc;
3982
3983 if (atomic_read(&instance->fw_reset_no_pci_access))
3984 return IRQ_HANDLED;
3985
3986 spin_lock_irqsave(&instance->hba_lock, flags);
3987 rc = megasas_deplete_reply_queue(instance, DID_OK);
3988 spin_unlock_irqrestore(&instance->hba_lock, flags);
3989
3990 return rc;
3991 }
3992
3993 /**
3994 * megasas_transition_to_ready - Move the FW to READY state
3995 * @instance: Adapter soft state
3996 * @ocr: Adapter reset state
3997 *
3998 * During the initialization, FW passes can potentially be in any one of
3999 * several possible states. If the FW in operational, waiting-for-handshake
4000 * states, driver must take steps to bring it to ready state. Otherwise, it
4001 * has to wait for the ready state.
4002 */
4003 int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)4004 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4005 {
4006 int i;
4007 u8 max_wait;
4008 u32 fw_state;
4009 u32 abs_state, curr_abs_state;
4010
4011 abs_state = instance->instancet->read_fw_status_reg(instance);
4012 fw_state = abs_state & MFI_STATE_MASK;
4013
4014 if (fw_state != MFI_STATE_READY)
4015 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4016 " state\n");
4017
4018 while (fw_state != MFI_STATE_READY) {
4019
4020 switch (fw_state) {
4021
4022 case MFI_STATE_FAULT:
4023 dev_printk(KERN_ERR, &instance->pdev->dev,
4024 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4025 abs_state & MFI_STATE_FAULT_CODE,
4026 abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4027 if (ocr) {
4028 max_wait = MEGASAS_RESET_WAIT_TIME;
4029 break;
4030 } else {
4031 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4032 megasas_dump_reg_set(instance->reg_set);
4033 return -ENODEV;
4034 }
4035
4036 case MFI_STATE_WAIT_HANDSHAKE:
4037 /*
4038 * Set the CLR bit in inbound doorbell
4039 */
4040 if ((instance->pdev->device ==
4041 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4042 (instance->pdev->device ==
4043 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4044 (instance->adapter_type != MFI_SERIES))
4045 writel(
4046 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4047 &instance->reg_set->doorbell);
4048 else
4049 writel(
4050 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4051 &instance->reg_set->inbound_doorbell);
4052
4053 max_wait = MEGASAS_RESET_WAIT_TIME;
4054 break;
4055
4056 case MFI_STATE_BOOT_MESSAGE_PENDING:
4057 if ((instance->pdev->device ==
4058 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4059 (instance->pdev->device ==
4060 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4061 (instance->adapter_type != MFI_SERIES))
4062 writel(MFI_INIT_HOTPLUG,
4063 &instance->reg_set->doorbell);
4064 else
4065 writel(MFI_INIT_HOTPLUG,
4066 &instance->reg_set->inbound_doorbell);
4067
4068 max_wait = MEGASAS_RESET_WAIT_TIME;
4069 break;
4070
4071 case MFI_STATE_OPERATIONAL:
4072 /*
4073 * Bring it to READY state; assuming max wait 10 secs
4074 */
4075 instance->instancet->disable_intr(instance);
4076 if ((instance->pdev->device ==
4077 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4078 (instance->pdev->device ==
4079 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4080 (instance->adapter_type != MFI_SERIES)) {
4081 writel(MFI_RESET_FLAGS,
4082 &instance->reg_set->doorbell);
4083
4084 if (instance->adapter_type != MFI_SERIES) {
4085 for (i = 0; i < (10 * 1000); i += 20) {
4086 if (megasas_readl(
4087 instance,
4088 &instance->
4089 reg_set->
4090 doorbell) & 1)
4091 msleep(20);
4092 else
4093 break;
4094 }
4095 }
4096 } else
4097 writel(MFI_RESET_FLAGS,
4098 &instance->reg_set->inbound_doorbell);
4099
4100 max_wait = MEGASAS_RESET_WAIT_TIME;
4101 break;
4102
4103 case MFI_STATE_UNDEFINED:
4104 /*
4105 * This state should not last for more than 2 seconds
4106 */
4107 max_wait = MEGASAS_RESET_WAIT_TIME;
4108 break;
4109
4110 case MFI_STATE_BB_INIT:
4111 max_wait = MEGASAS_RESET_WAIT_TIME;
4112 break;
4113
4114 case MFI_STATE_FW_INIT:
4115 max_wait = MEGASAS_RESET_WAIT_TIME;
4116 break;
4117
4118 case MFI_STATE_FW_INIT_2:
4119 max_wait = MEGASAS_RESET_WAIT_TIME;
4120 break;
4121
4122 case MFI_STATE_DEVICE_SCAN:
4123 max_wait = MEGASAS_RESET_WAIT_TIME;
4124 break;
4125
4126 case MFI_STATE_FLUSH_CACHE:
4127 max_wait = MEGASAS_RESET_WAIT_TIME;
4128 break;
4129
4130 default:
4131 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4132 fw_state);
4133 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4134 megasas_dump_reg_set(instance->reg_set);
4135 return -ENODEV;
4136 }
4137
4138 /*
4139 * The cur_state should not last for more than max_wait secs
4140 */
4141 for (i = 0; i < max_wait * 50; i++) {
4142 curr_abs_state = instance->instancet->
4143 read_fw_status_reg(instance);
4144
4145 if (abs_state == curr_abs_state) {
4146 msleep(20);
4147 } else
4148 break;
4149 }
4150
4151 /*
4152 * Return error if fw_state hasn't changed after max_wait
4153 */
4154 if (curr_abs_state == abs_state) {
4155 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4156 "in %d secs\n", fw_state, max_wait);
4157 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4158 megasas_dump_reg_set(instance->reg_set);
4159 return -ENODEV;
4160 }
4161
4162 abs_state = curr_abs_state;
4163 fw_state = curr_abs_state & MFI_STATE_MASK;
4164 }
4165 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4166
4167 return 0;
4168 }
4169
4170 /**
4171 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
4172 * @instance: Adapter soft state
4173 */
megasas_teardown_frame_pool(struct megasas_instance * instance)4174 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4175 {
4176 int i;
4177 u16 max_cmd = instance->max_mfi_cmds;
4178 struct megasas_cmd *cmd;
4179
4180 if (!instance->frame_dma_pool)
4181 return;
4182
4183 /*
4184 * Return all frames to pool
4185 */
4186 for (i = 0; i < max_cmd; i++) {
4187
4188 cmd = instance->cmd_list[i];
4189
4190 if (cmd->frame)
4191 dma_pool_free(instance->frame_dma_pool, cmd->frame,
4192 cmd->frame_phys_addr);
4193
4194 if (cmd->sense)
4195 dma_pool_free(instance->sense_dma_pool, cmd->sense,
4196 cmd->sense_phys_addr);
4197 }
4198
4199 /*
4200 * Now destroy the pool itself
4201 */
4202 dma_pool_destroy(instance->frame_dma_pool);
4203 dma_pool_destroy(instance->sense_dma_pool);
4204
4205 instance->frame_dma_pool = NULL;
4206 instance->sense_dma_pool = NULL;
4207 }
4208
4209 /**
4210 * megasas_create_frame_pool - Creates DMA pool for cmd frames
4211 * @instance: Adapter soft state
4212 *
4213 * Each command packet has an embedded DMA memory buffer that is used for
4214 * filling MFI frame and the SG list that immediately follows the frame. This
4215 * function creates those DMA memory buffers for each command packet by using
4216 * PCI pool facility.
4217 */
megasas_create_frame_pool(struct megasas_instance * instance)4218 static int megasas_create_frame_pool(struct megasas_instance *instance)
4219 {
4220 int i;
4221 u16 max_cmd;
4222 u32 frame_count;
4223 struct megasas_cmd *cmd;
4224
4225 max_cmd = instance->max_mfi_cmds;
4226
4227 /*
4228 * For MFI controllers.
4229 * max_num_sge = 60
4230 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
4231 * Total 960 byte (15 MFI frame of 64 byte)
4232 *
4233 * Fusion adapter require only 3 extra frame.
4234 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4235 * max_sge_sz = 12 byte (sizeof megasas_sge64)
4236 * Total 192 byte (3 MFI frame of 64 byte)
4237 */
4238 frame_count = (instance->adapter_type == MFI_SERIES) ?
4239 (15 + 1) : (3 + 1);
4240 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4241 /*
4242 * Use DMA pool facility provided by PCI layer
4243 */
4244 instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4245 &instance->pdev->dev,
4246 instance->mfi_frame_size, 256, 0);
4247
4248 if (!instance->frame_dma_pool) {
4249 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4250 return -ENOMEM;
4251 }
4252
4253 instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4254 &instance->pdev->dev, 128,
4255 4, 0);
4256
4257 if (!instance->sense_dma_pool) {
4258 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4259
4260 dma_pool_destroy(instance->frame_dma_pool);
4261 instance->frame_dma_pool = NULL;
4262
4263 return -ENOMEM;
4264 }
4265
4266 /*
4267 * Allocate and attach a frame to each of the commands in cmd_list.
4268 * By making cmd->index as the context instead of the &cmd, we can
4269 * always use 32bit context regardless of the architecture
4270 */
4271 for (i = 0; i < max_cmd; i++) {
4272
4273 cmd = instance->cmd_list[i];
4274
4275 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4276 GFP_KERNEL, &cmd->frame_phys_addr);
4277
4278 cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4279 GFP_KERNEL, &cmd->sense_phys_addr);
4280
4281 /*
4282 * megasas_teardown_frame_pool() takes care of freeing
4283 * whatever has been allocated
4284 */
4285 if (!cmd->frame || !cmd->sense) {
4286 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4287 megasas_teardown_frame_pool(instance);
4288 return -ENOMEM;
4289 }
4290
4291 cmd->frame->io.context = cpu_to_le32(cmd->index);
4292 cmd->frame->io.pad_0 = 0;
4293 if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4294 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4295 }
4296
4297 return 0;
4298 }
4299
4300 /**
4301 * megasas_free_cmds - Free all the cmds in the free cmd pool
4302 * @instance: Adapter soft state
4303 */
megasas_free_cmds(struct megasas_instance * instance)4304 void megasas_free_cmds(struct megasas_instance *instance)
4305 {
4306 int i;
4307
4308 /* First free the MFI frame pool */
4309 megasas_teardown_frame_pool(instance);
4310
4311 /* Free all the commands in the cmd_list */
4312 for (i = 0; i < instance->max_mfi_cmds; i++)
4313
4314 kfree(instance->cmd_list[i]);
4315
4316 /* Free the cmd_list buffer itself */
4317 kfree(instance->cmd_list);
4318 instance->cmd_list = NULL;
4319
4320 INIT_LIST_HEAD(&instance->cmd_pool);
4321 }
4322
4323 /**
4324 * megasas_alloc_cmds - Allocates the command packets
4325 * @instance: Adapter soft state
4326 *
4327 * Each command that is issued to the FW, whether IO commands from the OS or
4328 * internal commands like IOCTLs, are wrapped in local data structure called
4329 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4330 * the FW.
4331 *
4332 * Each frame has a 32-bit field called context (tag). This context is used
4333 * to get back the megasas_cmd from the frame when a frame gets completed in
4334 * the ISR. Typically the address of the megasas_cmd itself would be used as
4335 * the context. But we wanted to keep the differences between 32 and 64 bit
4336 * systems to the mininum. We always use 32 bit integers for the context. In
4337 * this driver, the 32 bit values are the indices into an array cmd_list.
4338 * This array is used only to look up the megasas_cmd given the context. The
4339 * free commands themselves are maintained in a linked list called cmd_pool.
4340 */
megasas_alloc_cmds(struct megasas_instance * instance)4341 int megasas_alloc_cmds(struct megasas_instance *instance)
4342 {
4343 int i;
4344 int j;
4345 u16 max_cmd;
4346 struct megasas_cmd *cmd;
4347
4348 max_cmd = instance->max_mfi_cmds;
4349
4350 /*
4351 * instance->cmd_list is an array of struct megasas_cmd pointers.
4352 * Allocate the dynamic array first and then allocate individual
4353 * commands.
4354 */
4355 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4356
4357 if (!instance->cmd_list) {
4358 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4359 return -ENOMEM;
4360 }
4361
4362 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4363
4364 for (i = 0; i < max_cmd; i++) {
4365 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4366 GFP_KERNEL);
4367
4368 if (!instance->cmd_list[i]) {
4369
4370 for (j = 0; j < i; j++)
4371 kfree(instance->cmd_list[j]);
4372
4373 kfree(instance->cmd_list);
4374 instance->cmd_list = NULL;
4375
4376 return -ENOMEM;
4377 }
4378 }
4379
4380 for (i = 0; i < max_cmd; i++) {
4381 cmd = instance->cmd_list[i];
4382 memset(cmd, 0, sizeof(struct megasas_cmd));
4383 cmd->index = i;
4384 cmd->scmd = NULL;
4385 cmd->instance = instance;
4386
4387 list_add_tail(&cmd->list, &instance->cmd_pool);
4388 }
4389
4390 /*
4391 * Create a frame pool and assign one frame to each cmd
4392 */
4393 if (megasas_create_frame_pool(instance)) {
4394 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4395 megasas_free_cmds(instance);
4396 return -ENOMEM;
4397 }
4398
4399 return 0;
4400 }
4401
4402 /*
4403 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4404 * @instance: Adapter soft state
4405 *
4406 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4407 * or FW is not under OCR.
4408 */
4409 inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4410 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4411
4412 if (instance->adapter_type == MFI_SERIES)
4413 return KILL_ADAPTER;
4414 else if (instance->unload ||
4415 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4416 &instance->reset_flags))
4417 return IGNORE_TIMEOUT;
4418 else
4419 return INITIATE_OCR;
4420 }
4421
4422 static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4423 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4424 {
4425 int ret;
4426 struct megasas_cmd *cmd;
4427 struct megasas_dcmd_frame *dcmd;
4428
4429 struct MR_PRIV_DEVICE *mr_device_priv_data;
4430 u16 device_id = 0;
4431
4432 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4433 cmd = megasas_get_cmd(instance);
4434
4435 if (!cmd) {
4436 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4437 return;
4438 }
4439
4440 dcmd = &cmd->frame->dcmd;
4441
4442 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4443 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4444
4445 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4446 dcmd->cmd = MFI_CMD_DCMD;
4447 dcmd->cmd_status = 0xFF;
4448 dcmd->sge_count = 1;
4449 dcmd->flags = MFI_FRAME_DIR_READ;
4450 dcmd->timeout = 0;
4451 dcmd->pad_0 = 0;
4452 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4453 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4454
4455 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4456 sizeof(struct MR_PD_INFO));
4457
4458 if ((instance->adapter_type != MFI_SERIES) &&
4459 !instance->mask_interrupts)
4460 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4461 else
4462 ret = megasas_issue_polled(instance, cmd);
4463
4464 switch (ret) {
4465 case DCMD_SUCCESS:
4466 mr_device_priv_data = sdev->hostdata;
4467 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4468 mr_device_priv_data->interface_type =
4469 instance->pd_info->state.ddf.pdType.intf;
4470 break;
4471
4472 case DCMD_TIMEOUT:
4473
4474 switch (dcmd_timeout_ocr_possible(instance)) {
4475 case INITIATE_OCR:
4476 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4477 mutex_unlock(&instance->reset_mutex);
4478 megasas_reset_fusion(instance->host,
4479 MFI_IO_TIMEOUT_OCR);
4480 mutex_lock(&instance->reset_mutex);
4481 break;
4482 case KILL_ADAPTER:
4483 megaraid_sas_kill_hba(instance);
4484 break;
4485 case IGNORE_TIMEOUT:
4486 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4487 __func__, __LINE__);
4488 break;
4489 }
4490
4491 break;
4492 }
4493
4494 if (ret != DCMD_TIMEOUT)
4495 megasas_return_cmd(instance, cmd);
4496
4497 return;
4498 }
4499 /*
4500 * megasas_get_pd_list_info - Returns FW's pd_list structure
4501 * @instance: Adapter soft state
4502 * @pd_list: pd_list structure
4503 *
4504 * Issues an internal command (DCMD) to get the FW's controller PD
4505 * list structure. This information is mainly used to find out SYSTEM
4506 * supported by the FW.
4507 */
4508 static int
megasas_get_pd_list(struct megasas_instance * instance)4509 megasas_get_pd_list(struct megasas_instance *instance)
4510 {
4511 int ret = 0, pd_index = 0;
4512 struct megasas_cmd *cmd;
4513 struct megasas_dcmd_frame *dcmd;
4514 struct MR_PD_LIST *ci;
4515 struct MR_PD_ADDRESS *pd_addr;
4516
4517 if (instance->pd_list_not_supported) {
4518 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4519 "not supported by firmware\n");
4520 return ret;
4521 }
4522
4523 ci = instance->pd_list_buf;
4524
4525 cmd = megasas_get_cmd(instance);
4526
4527 if (!cmd) {
4528 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4529 return -ENOMEM;
4530 }
4531
4532 dcmd = &cmd->frame->dcmd;
4533
4534 memset(ci, 0, sizeof(*ci));
4535 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4536
4537 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4538 dcmd->mbox.b[1] = 0;
4539 dcmd->cmd = MFI_CMD_DCMD;
4540 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4541 dcmd->sge_count = 1;
4542 dcmd->flags = MFI_FRAME_DIR_READ;
4543 dcmd->timeout = 0;
4544 dcmd->pad_0 = 0;
4545 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4546 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4547
4548 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4549 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4550
4551 if ((instance->adapter_type != MFI_SERIES) &&
4552 !instance->mask_interrupts)
4553 ret = megasas_issue_blocked_cmd(instance, cmd,
4554 MFI_IO_TIMEOUT_SECS);
4555 else
4556 ret = megasas_issue_polled(instance, cmd);
4557
4558 switch (ret) {
4559 case DCMD_FAILED:
4560 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4561 "failed/not supported by firmware\n");
4562
4563 if (instance->adapter_type != MFI_SERIES)
4564 megaraid_sas_kill_hba(instance);
4565 else
4566 instance->pd_list_not_supported = 1;
4567 break;
4568 case DCMD_TIMEOUT:
4569
4570 switch (dcmd_timeout_ocr_possible(instance)) {
4571 case INITIATE_OCR:
4572 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4573 /*
4574 * DCMD failed from AEN path.
4575 * AEN path already hold reset_mutex to avoid PCI access
4576 * while OCR is in progress.
4577 */
4578 mutex_unlock(&instance->reset_mutex);
4579 megasas_reset_fusion(instance->host,
4580 MFI_IO_TIMEOUT_OCR);
4581 mutex_lock(&instance->reset_mutex);
4582 break;
4583 case KILL_ADAPTER:
4584 megaraid_sas_kill_hba(instance);
4585 break;
4586 case IGNORE_TIMEOUT:
4587 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4588 __func__, __LINE__);
4589 break;
4590 }
4591
4592 break;
4593
4594 case DCMD_SUCCESS:
4595 pd_addr = ci->addr;
4596 if (megasas_dbg_lvl & LD_PD_DEBUG)
4597 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4598 __func__, le32_to_cpu(ci->count));
4599
4600 if ((le32_to_cpu(ci->count) >
4601 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4602 break;
4603
4604 memset(instance->local_pd_list, 0,
4605 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4606
4607 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4608 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4609 le16_to_cpu(pd_addr->deviceId);
4610 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4611 pd_addr->scsiDevType;
4612 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4613 MR_PD_STATE_SYSTEM;
4614 if (megasas_dbg_lvl & LD_PD_DEBUG)
4615 dev_info(&instance->pdev->dev,
4616 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4617 pd_index, le16_to_cpu(pd_addr->deviceId),
4618 pd_addr->scsiDevType);
4619 pd_addr++;
4620 }
4621
4622 memcpy(instance->pd_list, instance->local_pd_list,
4623 sizeof(instance->pd_list));
4624 break;
4625
4626 }
4627
4628 if (ret != DCMD_TIMEOUT)
4629 megasas_return_cmd(instance, cmd);
4630
4631 return ret;
4632 }
4633
4634 /*
4635 * megasas_get_ld_list_info - Returns FW's ld_list structure
4636 * @instance: Adapter soft state
4637 * @ld_list: ld_list structure
4638 *
4639 * Issues an internal command (DCMD) to get the FW's controller PD
4640 * list structure. This information is mainly used to find out SYSTEM
4641 * supported by the FW.
4642 */
4643 static int
megasas_get_ld_list(struct megasas_instance * instance)4644 megasas_get_ld_list(struct megasas_instance *instance)
4645 {
4646 int ret = 0, ld_index = 0, ids = 0;
4647 struct megasas_cmd *cmd;
4648 struct megasas_dcmd_frame *dcmd;
4649 struct MR_LD_LIST *ci;
4650 dma_addr_t ci_h = 0;
4651 u32 ld_count;
4652
4653 ci = instance->ld_list_buf;
4654 ci_h = instance->ld_list_buf_h;
4655
4656 cmd = megasas_get_cmd(instance);
4657
4658 if (!cmd) {
4659 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4660 return -ENOMEM;
4661 }
4662
4663 dcmd = &cmd->frame->dcmd;
4664
4665 memset(ci, 0, sizeof(*ci));
4666 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4667
4668 if (instance->supportmax256vd)
4669 dcmd->mbox.b[0] = 1;
4670 dcmd->cmd = MFI_CMD_DCMD;
4671 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4672 dcmd->sge_count = 1;
4673 dcmd->flags = MFI_FRAME_DIR_READ;
4674 dcmd->timeout = 0;
4675 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4676 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4677 dcmd->pad_0 = 0;
4678
4679 megasas_set_dma_settings(instance, dcmd, ci_h,
4680 sizeof(struct MR_LD_LIST));
4681
4682 if ((instance->adapter_type != MFI_SERIES) &&
4683 !instance->mask_interrupts)
4684 ret = megasas_issue_blocked_cmd(instance, cmd,
4685 MFI_IO_TIMEOUT_SECS);
4686 else
4687 ret = megasas_issue_polled(instance, cmd);
4688
4689 ld_count = le32_to_cpu(ci->ldCount);
4690
4691 switch (ret) {
4692 case DCMD_FAILED:
4693 megaraid_sas_kill_hba(instance);
4694 break;
4695 case DCMD_TIMEOUT:
4696
4697 switch (dcmd_timeout_ocr_possible(instance)) {
4698 case INITIATE_OCR:
4699 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4700 /*
4701 * DCMD failed from AEN path.
4702 * AEN path already hold reset_mutex to avoid PCI access
4703 * while OCR is in progress.
4704 */
4705 mutex_unlock(&instance->reset_mutex);
4706 megasas_reset_fusion(instance->host,
4707 MFI_IO_TIMEOUT_OCR);
4708 mutex_lock(&instance->reset_mutex);
4709 break;
4710 case KILL_ADAPTER:
4711 megaraid_sas_kill_hba(instance);
4712 break;
4713 case IGNORE_TIMEOUT:
4714 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4715 __func__, __LINE__);
4716 break;
4717 }
4718
4719 break;
4720
4721 case DCMD_SUCCESS:
4722 if (megasas_dbg_lvl & LD_PD_DEBUG)
4723 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4724 __func__, ld_count);
4725
4726 if (ld_count > instance->fw_supported_vd_count)
4727 break;
4728
4729 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4730
4731 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4732 if (ci->ldList[ld_index].state != 0) {
4733 ids = ci->ldList[ld_index].ref.targetId;
4734 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4735 if (megasas_dbg_lvl & LD_PD_DEBUG)
4736 dev_info(&instance->pdev->dev,
4737 "LD%d: targetID: 0x%03x\n",
4738 ld_index, ids);
4739 }
4740 }
4741
4742 break;
4743 }
4744
4745 if (ret != DCMD_TIMEOUT)
4746 megasas_return_cmd(instance, cmd);
4747
4748 return ret;
4749 }
4750
4751 /**
4752 * megasas_ld_list_query - Returns FW's ld_list structure
4753 * @instance: Adapter soft state
4754 * @query_type: ld_list structure type
4755 *
4756 * Issues an internal command (DCMD) to get the FW's controller PD
4757 * list structure. This information is mainly used to find out SYSTEM
4758 * supported by the FW.
4759 */
4760 static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4761 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4762 {
4763 int ret = 0, ld_index = 0, ids = 0;
4764 struct megasas_cmd *cmd;
4765 struct megasas_dcmd_frame *dcmd;
4766 struct MR_LD_TARGETID_LIST *ci;
4767 dma_addr_t ci_h = 0;
4768 u32 tgtid_count;
4769
4770 ci = instance->ld_targetid_list_buf;
4771 ci_h = instance->ld_targetid_list_buf_h;
4772
4773 cmd = megasas_get_cmd(instance);
4774
4775 if (!cmd) {
4776 dev_warn(&instance->pdev->dev,
4777 "megasas_ld_list_query: Failed to get cmd\n");
4778 return -ENOMEM;
4779 }
4780
4781 dcmd = &cmd->frame->dcmd;
4782
4783 memset(ci, 0, sizeof(*ci));
4784 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4785
4786 dcmd->mbox.b[0] = query_type;
4787 if (instance->supportmax256vd)
4788 dcmd->mbox.b[2] = 1;
4789
4790 dcmd->cmd = MFI_CMD_DCMD;
4791 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4792 dcmd->sge_count = 1;
4793 dcmd->flags = MFI_FRAME_DIR_READ;
4794 dcmd->timeout = 0;
4795 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4796 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4797 dcmd->pad_0 = 0;
4798
4799 megasas_set_dma_settings(instance, dcmd, ci_h,
4800 sizeof(struct MR_LD_TARGETID_LIST));
4801
4802 if ((instance->adapter_type != MFI_SERIES) &&
4803 !instance->mask_interrupts)
4804 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4805 else
4806 ret = megasas_issue_polled(instance, cmd);
4807
4808 switch (ret) {
4809 case DCMD_FAILED:
4810 dev_info(&instance->pdev->dev,
4811 "DCMD not supported by firmware - %s %d\n",
4812 __func__, __LINE__);
4813 ret = megasas_get_ld_list(instance);
4814 break;
4815 case DCMD_TIMEOUT:
4816 switch (dcmd_timeout_ocr_possible(instance)) {
4817 case INITIATE_OCR:
4818 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4819 /*
4820 * DCMD failed from AEN path.
4821 * AEN path already hold reset_mutex to avoid PCI access
4822 * while OCR is in progress.
4823 */
4824 mutex_unlock(&instance->reset_mutex);
4825 megasas_reset_fusion(instance->host,
4826 MFI_IO_TIMEOUT_OCR);
4827 mutex_lock(&instance->reset_mutex);
4828 break;
4829 case KILL_ADAPTER:
4830 megaraid_sas_kill_hba(instance);
4831 break;
4832 case IGNORE_TIMEOUT:
4833 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4834 __func__, __LINE__);
4835 break;
4836 }
4837
4838 break;
4839 case DCMD_SUCCESS:
4840 tgtid_count = le32_to_cpu(ci->count);
4841
4842 if (megasas_dbg_lvl & LD_PD_DEBUG)
4843 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4844 __func__, tgtid_count);
4845
4846 if ((tgtid_count > (instance->fw_supported_vd_count)))
4847 break;
4848
4849 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4850 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4851 ids = ci->targetId[ld_index];
4852 instance->ld_ids[ids] = ci->targetId[ld_index];
4853 if (megasas_dbg_lvl & LD_PD_DEBUG)
4854 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4855 ld_index, ci->targetId[ld_index]);
4856 }
4857
4858 break;
4859 }
4860
4861 if (ret != DCMD_TIMEOUT)
4862 megasas_return_cmd(instance, cmd);
4863
4864 return ret;
4865 }
4866
4867 /**
4868 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
4869 * dcmd.mbox - reserved
4870 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
4871 * Desc: This DCMD will return the combined device list
4872 * Status: MFI_STAT_OK - List returned successfully
4873 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4874 * disabled
4875 * @instance: Adapter soft state
4876 * @is_probe: Driver probe check
4877 * Return: 0 if DCMD succeeded
4878 * non-zero if failed
4879 */
4880 static int
megasas_host_device_list_query(struct megasas_instance * instance,bool is_probe)4881 megasas_host_device_list_query(struct megasas_instance *instance,
4882 bool is_probe)
4883 {
4884 int ret, i, target_id;
4885 struct megasas_cmd *cmd;
4886 struct megasas_dcmd_frame *dcmd;
4887 struct MR_HOST_DEVICE_LIST *ci;
4888 u32 count;
4889 dma_addr_t ci_h;
4890
4891 ci = instance->host_device_list_buf;
4892 ci_h = instance->host_device_list_buf_h;
4893
4894 cmd = megasas_get_cmd(instance);
4895
4896 if (!cmd) {
4897 dev_warn(&instance->pdev->dev,
4898 "%s: failed to get cmd\n",
4899 __func__);
4900 return -ENOMEM;
4901 }
4902
4903 dcmd = &cmd->frame->dcmd;
4904
4905 memset(ci, 0, sizeof(*ci));
4906 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4907
4908 dcmd->mbox.b[0] = is_probe ? 0 : 1;
4909 dcmd->cmd = MFI_CMD_DCMD;
4910 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4911 dcmd->sge_count = 1;
4912 dcmd->flags = MFI_FRAME_DIR_READ;
4913 dcmd->timeout = 0;
4914 dcmd->pad_0 = 0;
4915 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4916 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4917
4918 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4919
4920 if (!instance->mask_interrupts) {
4921 ret = megasas_issue_blocked_cmd(instance, cmd,
4922 MFI_IO_TIMEOUT_SECS);
4923 } else {
4924 ret = megasas_issue_polled(instance, cmd);
4925 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4926 }
4927
4928 switch (ret) {
4929 case DCMD_SUCCESS:
4930 /* Fill the internal pd_list and ld_ids array based on
4931 * targetIds returned by FW
4932 */
4933 count = le32_to_cpu(ci->count);
4934
4935 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4936 break;
4937
4938 if (megasas_dbg_lvl & LD_PD_DEBUG)
4939 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4940 __func__, count);
4941
4942 memset(instance->local_pd_list, 0,
4943 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4944 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4945 for (i = 0; i < count; i++) {
4946 target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4947 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4948 instance->local_pd_list[target_id].tid = target_id;
4949 instance->local_pd_list[target_id].driveType =
4950 ci->host_device_list[i].scsi_type;
4951 instance->local_pd_list[target_id].driveState =
4952 MR_PD_STATE_SYSTEM;
4953 if (megasas_dbg_lvl & LD_PD_DEBUG)
4954 dev_info(&instance->pdev->dev,
4955 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4956 i, target_id, ci->host_device_list[i].scsi_type);
4957 } else {
4958 instance->ld_ids[target_id] = target_id;
4959 if (megasas_dbg_lvl & LD_PD_DEBUG)
4960 dev_info(&instance->pdev->dev,
4961 "Device %d: LD targetID: 0x%03x\n",
4962 i, target_id);
4963 }
4964 }
4965
4966 memcpy(instance->pd_list, instance->local_pd_list,
4967 sizeof(instance->pd_list));
4968 break;
4969
4970 case DCMD_TIMEOUT:
4971 switch (dcmd_timeout_ocr_possible(instance)) {
4972 case INITIATE_OCR:
4973 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4974 mutex_unlock(&instance->reset_mutex);
4975 megasas_reset_fusion(instance->host,
4976 MFI_IO_TIMEOUT_OCR);
4977 mutex_lock(&instance->reset_mutex);
4978 break;
4979 case KILL_ADAPTER:
4980 megaraid_sas_kill_hba(instance);
4981 break;
4982 case IGNORE_TIMEOUT:
4983 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4984 __func__, __LINE__);
4985 break;
4986 }
4987 break;
4988 case DCMD_FAILED:
4989 dev_err(&instance->pdev->dev,
4990 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4991 __func__);
4992 break;
4993 }
4994
4995 if (ret != DCMD_TIMEOUT)
4996 megasas_return_cmd(instance, cmd);
4997
4998 return ret;
4999 }
5000
5001 /*
5002 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5003 * instance : Controller's instance
5004 */
megasas_update_ext_vd_details(struct megasas_instance * instance)5005 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5006 {
5007 struct fusion_context *fusion;
5008 u32 ventura_map_sz = 0;
5009
5010 fusion = instance->ctrl_context;
5011 /* For MFI based controllers return dummy success */
5012 if (!fusion)
5013 return;
5014
5015 instance->supportmax256vd =
5016 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5017 /* Below is additional check to address future FW enhancement */
5018 if (instance->ctrl_info_buf->max_lds > 64)
5019 instance->supportmax256vd = 1;
5020
5021 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5022 * MEGASAS_MAX_DEV_PER_CHANNEL;
5023 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5024 * MEGASAS_MAX_DEV_PER_CHANNEL;
5025 if (instance->supportmax256vd) {
5026 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5027 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5028 } else {
5029 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5030 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5031 }
5032
5033 dev_info(&instance->pdev->dev,
5034 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5035 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5036 instance->ctrl_info_buf->max_lds);
5037
5038 if (instance->max_raid_mapsize) {
5039 ventura_map_sz = instance->max_raid_mapsize *
5040 MR_MIN_MAP_SIZE; /* 64k */
5041 fusion->current_map_sz = ventura_map_sz;
5042 fusion->max_map_sz = ventura_map_sz;
5043 } else {
5044 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
5045 (sizeof(struct MR_LD_SPAN_MAP) *
5046 (instance->fw_supported_vd_count - 1));
5047 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
5048
5049 fusion->max_map_sz =
5050 max(fusion->old_map_sz, fusion->new_map_sz);
5051
5052 if (instance->supportmax256vd)
5053 fusion->current_map_sz = fusion->new_map_sz;
5054 else
5055 fusion->current_map_sz = fusion->old_map_sz;
5056 }
5057 /* irrespective of FW raid maps, driver raid map is constant */
5058 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5059 }
5060
5061 /*
5062 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5063 * dcmd.hdr.length - number of bytes to read
5064 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
5065 * Desc: Fill in snapdump properties
5066 * Status: MFI_STAT_OK- Command successful
5067 */
megasas_get_snapdump_properties(struct megasas_instance * instance)5068 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5069 {
5070 int ret = 0;
5071 struct megasas_cmd *cmd;
5072 struct megasas_dcmd_frame *dcmd;
5073 struct MR_SNAPDUMP_PROPERTIES *ci;
5074 dma_addr_t ci_h = 0;
5075
5076 ci = instance->snapdump_prop;
5077 ci_h = instance->snapdump_prop_h;
5078
5079 if (!ci)
5080 return;
5081
5082 cmd = megasas_get_cmd(instance);
5083
5084 if (!cmd) {
5085 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5086 return;
5087 }
5088
5089 dcmd = &cmd->frame->dcmd;
5090
5091 memset(ci, 0, sizeof(*ci));
5092 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5093
5094 dcmd->cmd = MFI_CMD_DCMD;
5095 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5096 dcmd->sge_count = 1;
5097 dcmd->flags = MFI_FRAME_DIR_READ;
5098 dcmd->timeout = 0;
5099 dcmd->pad_0 = 0;
5100 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5101 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5102
5103 megasas_set_dma_settings(instance, dcmd, ci_h,
5104 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5105
5106 if (!instance->mask_interrupts) {
5107 ret = megasas_issue_blocked_cmd(instance, cmd,
5108 MFI_IO_TIMEOUT_SECS);
5109 } else {
5110 ret = megasas_issue_polled(instance, cmd);
5111 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5112 }
5113
5114 switch (ret) {
5115 case DCMD_SUCCESS:
5116 instance->snapdump_wait_time =
5117 min_t(u8, ci->trigger_min_num_sec_before_ocr,
5118 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5119 break;
5120
5121 case DCMD_TIMEOUT:
5122 switch (dcmd_timeout_ocr_possible(instance)) {
5123 case INITIATE_OCR:
5124 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5125 mutex_unlock(&instance->reset_mutex);
5126 megasas_reset_fusion(instance->host,
5127 MFI_IO_TIMEOUT_OCR);
5128 mutex_lock(&instance->reset_mutex);
5129 break;
5130 case KILL_ADAPTER:
5131 megaraid_sas_kill_hba(instance);
5132 break;
5133 case IGNORE_TIMEOUT:
5134 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5135 __func__, __LINE__);
5136 break;
5137 }
5138 }
5139
5140 if (ret != DCMD_TIMEOUT)
5141 megasas_return_cmd(instance, cmd);
5142 }
5143
5144 /**
5145 * megasas_get_controller_info - Returns FW's controller structure
5146 * @instance: Adapter soft state
5147 *
5148 * Issues an internal command (DCMD) to get the FW's controller structure.
5149 * This information is mainly used to find out the maximum IO transfer per
5150 * command supported by the FW.
5151 */
5152 int
megasas_get_ctrl_info(struct megasas_instance * instance)5153 megasas_get_ctrl_info(struct megasas_instance *instance)
5154 {
5155 int ret = 0;
5156 struct megasas_cmd *cmd;
5157 struct megasas_dcmd_frame *dcmd;
5158 struct megasas_ctrl_info *ci;
5159 dma_addr_t ci_h = 0;
5160
5161 ci = instance->ctrl_info_buf;
5162 ci_h = instance->ctrl_info_buf_h;
5163
5164 cmd = megasas_get_cmd(instance);
5165
5166 if (!cmd) {
5167 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5168 return -ENOMEM;
5169 }
5170
5171 dcmd = &cmd->frame->dcmd;
5172
5173 memset(ci, 0, sizeof(*ci));
5174 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5175
5176 dcmd->cmd = MFI_CMD_DCMD;
5177 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5178 dcmd->sge_count = 1;
5179 dcmd->flags = MFI_FRAME_DIR_READ;
5180 dcmd->timeout = 0;
5181 dcmd->pad_0 = 0;
5182 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5183 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5184 dcmd->mbox.b[0] = 1;
5185
5186 megasas_set_dma_settings(instance, dcmd, ci_h,
5187 sizeof(struct megasas_ctrl_info));
5188
5189 if ((instance->adapter_type != MFI_SERIES) &&
5190 !instance->mask_interrupts) {
5191 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5192 } else {
5193 ret = megasas_issue_polled(instance, cmd);
5194 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5195 }
5196
5197 switch (ret) {
5198 case DCMD_SUCCESS:
5199 /* Save required controller information in
5200 * CPU endianness format.
5201 */
5202 le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5203 le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5204 le32_to_cpus((u32 *)&ci->adapterOperations2);
5205 le32_to_cpus((u32 *)&ci->adapterOperations3);
5206 le16_to_cpus((u16 *)&ci->adapter_operations4);
5207 le32_to_cpus((u32 *)&ci->adapter_operations5);
5208
5209 /* Update the latest Ext VD info.
5210 * From Init path, store current firmware details.
5211 * From OCR path, detect any firmware properties changes.
5212 * in case of Firmware upgrade without system reboot.
5213 */
5214 megasas_update_ext_vd_details(instance);
5215 instance->support_seqnum_jbod_fp =
5216 ci->adapterOperations3.useSeqNumJbodFP;
5217 instance->support_morethan256jbod =
5218 ci->adapter_operations4.support_pd_map_target_id;
5219 instance->support_nvme_passthru =
5220 ci->adapter_operations4.support_nvme_passthru;
5221 instance->support_pci_lane_margining =
5222 ci->adapter_operations5.support_pci_lane_margining;
5223 instance->task_abort_tmo = ci->TaskAbortTO;
5224 instance->max_reset_tmo = ci->MaxResetTO;
5225
5226 /*Check whether controller is iMR or MR */
5227 instance->is_imr = (ci->memory_size ? 0 : 1);
5228
5229 instance->snapdump_wait_time =
5230 (ci->properties.on_off_properties2.enable_snap_dump ?
5231 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5232
5233 instance->enable_fw_dev_list =
5234 ci->properties.on_off_properties2.enable_fw_dev_list;
5235
5236 dev_info(&instance->pdev->dev,
5237 "controller type\t: %s(%dMB)\n",
5238 instance->is_imr ? "iMR" : "MR",
5239 le16_to_cpu(ci->memory_size));
5240
5241 instance->disableOnlineCtrlReset =
5242 ci->properties.OnOffProperties.disableOnlineCtrlReset;
5243 instance->secure_jbod_support =
5244 ci->adapterOperations3.supportSecurityonJBOD;
5245 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5246 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5247 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5248 instance->secure_jbod_support ? "Yes" : "No");
5249 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5250 instance->support_nvme_passthru ? "Yes" : "No");
5251 dev_info(&instance->pdev->dev,
5252 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5253 instance->task_abort_tmo, instance->max_reset_tmo);
5254 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5255 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5256 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5257 instance->support_pci_lane_margining ? "Yes" : "No");
5258
5259 break;
5260
5261 case DCMD_TIMEOUT:
5262 switch (dcmd_timeout_ocr_possible(instance)) {
5263 case INITIATE_OCR:
5264 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5265 mutex_unlock(&instance->reset_mutex);
5266 megasas_reset_fusion(instance->host,
5267 MFI_IO_TIMEOUT_OCR);
5268 mutex_lock(&instance->reset_mutex);
5269 break;
5270 case KILL_ADAPTER:
5271 megaraid_sas_kill_hba(instance);
5272 break;
5273 case IGNORE_TIMEOUT:
5274 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5275 __func__, __LINE__);
5276 break;
5277 }
5278 break;
5279 case DCMD_FAILED:
5280 megaraid_sas_kill_hba(instance);
5281 break;
5282
5283 }
5284
5285 if (ret != DCMD_TIMEOUT)
5286 megasas_return_cmd(instance, cmd);
5287
5288 return ret;
5289 }
5290
5291 /*
5292 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
5293 * to firmware
5294 *
5295 * @instance: Adapter soft state
5296 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
5297 MR_CRASH_BUF_TURN_OFF = 0
5298 MR_CRASH_BUF_TURN_ON = 1
5299 * @return 0 on success non-zero on failure.
5300 * Issues an internal command (DCMD) to set parameters for crash dump feature.
5301 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5302 * that driver supports crash dump feature. This DCMD will be sent only if
5303 * crash dump feature is supported by the FW.
5304 *
5305 */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)5306 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5307 u8 crash_buf_state)
5308 {
5309 int ret = 0;
5310 struct megasas_cmd *cmd;
5311 struct megasas_dcmd_frame *dcmd;
5312
5313 cmd = megasas_get_cmd(instance);
5314
5315 if (!cmd) {
5316 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5317 return -ENOMEM;
5318 }
5319
5320
5321 dcmd = &cmd->frame->dcmd;
5322
5323 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5324 dcmd->mbox.b[0] = crash_buf_state;
5325 dcmd->cmd = MFI_CMD_DCMD;
5326 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5327 dcmd->sge_count = 1;
5328 dcmd->flags = MFI_FRAME_DIR_NONE;
5329 dcmd->timeout = 0;
5330 dcmd->pad_0 = 0;
5331 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5332 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5333
5334 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5335 CRASH_DMA_BUF_SIZE);
5336
5337 if ((instance->adapter_type != MFI_SERIES) &&
5338 !instance->mask_interrupts)
5339 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5340 else
5341 ret = megasas_issue_polled(instance, cmd);
5342
5343 if (ret == DCMD_TIMEOUT) {
5344 switch (dcmd_timeout_ocr_possible(instance)) {
5345 case INITIATE_OCR:
5346 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5347 megasas_reset_fusion(instance->host,
5348 MFI_IO_TIMEOUT_OCR);
5349 break;
5350 case KILL_ADAPTER:
5351 megaraid_sas_kill_hba(instance);
5352 break;
5353 case IGNORE_TIMEOUT:
5354 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5355 __func__, __LINE__);
5356 break;
5357 }
5358 } else
5359 megasas_return_cmd(instance, cmd);
5360
5361 return ret;
5362 }
5363
5364 /**
5365 * megasas_issue_init_mfi - Initializes the FW
5366 * @instance: Adapter soft state
5367 *
5368 * Issues the INIT MFI cmd
5369 */
5370 static int
megasas_issue_init_mfi(struct megasas_instance * instance)5371 megasas_issue_init_mfi(struct megasas_instance *instance)
5372 {
5373 __le32 context;
5374 struct megasas_cmd *cmd;
5375 struct megasas_init_frame *init_frame;
5376 struct megasas_init_queue_info *initq_info;
5377 dma_addr_t init_frame_h;
5378 dma_addr_t initq_info_h;
5379
5380 /*
5381 * Prepare a init frame. Note the init frame points to queue info
5382 * structure. Each frame has SGL allocated after first 64 bytes. For
5383 * this frame - since we don't need any SGL - we use SGL's space as
5384 * queue info structure
5385 *
5386 * We will not get a NULL command below. We just created the pool.
5387 */
5388 cmd = megasas_get_cmd(instance);
5389
5390 init_frame = (struct megasas_init_frame *)cmd->frame;
5391 initq_info = (struct megasas_init_queue_info *)
5392 ((unsigned long)init_frame + 64);
5393
5394 init_frame_h = cmd->frame_phys_addr;
5395 initq_info_h = init_frame_h + 64;
5396
5397 context = init_frame->context;
5398 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5399 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5400 init_frame->context = context;
5401
5402 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5403 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5404
5405 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5406 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5407
5408 init_frame->cmd = MFI_CMD_INIT;
5409 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5410 init_frame->queue_info_new_phys_addr_lo =
5411 cpu_to_le32(lower_32_bits(initq_info_h));
5412 init_frame->queue_info_new_phys_addr_hi =
5413 cpu_to_le32(upper_32_bits(initq_info_h));
5414
5415 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5416
5417 /*
5418 * disable the intr before firing the init frame to FW
5419 */
5420 instance->instancet->disable_intr(instance);
5421
5422 /*
5423 * Issue the init frame in polled mode
5424 */
5425
5426 if (megasas_issue_polled(instance, cmd)) {
5427 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5428 megasas_return_cmd(instance, cmd);
5429 goto fail_fw_init;
5430 }
5431
5432 megasas_return_cmd(instance, cmd);
5433
5434 return 0;
5435
5436 fail_fw_init:
5437 return -EINVAL;
5438 }
5439
5440 static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)5441 megasas_init_adapter_mfi(struct megasas_instance *instance)
5442 {
5443 u32 context_sz;
5444 u32 reply_q_sz;
5445
5446 /*
5447 * Get various operational parameters from status register
5448 */
5449 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5450 /*
5451 * Reduce the max supported cmds by 1. This is to ensure that the
5452 * reply_q_sz (1 more than the max cmd that driver may send)
5453 * does not exceed max cmds that the FW can support
5454 */
5455 instance->max_fw_cmds = instance->max_fw_cmds-1;
5456 instance->max_mfi_cmds = instance->max_fw_cmds;
5457 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5458 0x10;
5459 /*
5460 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5461 * are reserved for IOCTL + driver's internal DCMDs.
5462 */
5463 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5464 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5465 instance->max_scsi_cmds = (instance->max_fw_cmds -
5466 MEGASAS_SKINNY_INT_CMDS);
5467 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5468 } else {
5469 instance->max_scsi_cmds = (instance->max_fw_cmds -
5470 MEGASAS_INT_CMDS);
5471 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5472 }
5473
5474 instance->cur_can_queue = instance->max_scsi_cmds;
5475 /*
5476 * Create a pool of commands
5477 */
5478 if (megasas_alloc_cmds(instance))
5479 goto fail_alloc_cmds;
5480
5481 /*
5482 * Allocate memory for reply queue. Length of reply queue should
5483 * be _one_ more than the maximum commands handled by the firmware.
5484 *
5485 * Note: When FW completes commands, it places corresponding contex
5486 * values in this circular reply queue. This circular queue is a fairly
5487 * typical producer-consumer queue. FW is the producer (of completed
5488 * commands) and the driver is the consumer.
5489 */
5490 context_sz = sizeof(u32);
5491 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5492
5493 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5494 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5495
5496 if (!instance->reply_queue) {
5497 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5498 goto fail_reply_queue;
5499 }
5500
5501 if (megasas_issue_init_mfi(instance))
5502 goto fail_fw_init;
5503
5504 if (megasas_get_ctrl_info(instance)) {
5505 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5506 "Fail from %s %d\n", instance->unique_id,
5507 __func__, __LINE__);
5508 goto fail_fw_init;
5509 }
5510
5511 instance->fw_support_ieee = 0;
5512 instance->fw_support_ieee =
5513 (instance->instancet->read_fw_status_reg(instance) &
5514 0x04000000);
5515
5516 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5517 instance->fw_support_ieee);
5518
5519 if (instance->fw_support_ieee)
5520 instance->flag_ieee = 1;
5521
5522 return 0;
5523
5524 fail_fw_init:
5525
5526 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5527 instance->reply_queue, instance->reply_queue_h);
5528 fail_reply_queue:
5529 megasas_free_cmds(instance);
5530
5531 fail_alloc_cmds:
5532 return 1;
5533 }
5534
5535 static
megasas_setup_irq_poll(struct megasas_instance * instance)5536 void megasas_setup_irq_poll(struct megasas_instance *instance)
5537 {
5538 struct megasas_irq_context *irq_ctx;
5539 u32 count, i;
5540
5541 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5542
5543 /* Initialize IRQ poll */
5544 for (i = 0; i < count; i++) {
5545 irq_ctx = &instance->irq_context[i];
5546 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5547 irq_ctx->irq_poll_scheduled = false;
5548 irq_poll_init(&irq_ctx->irqpoll,
5549 instance->threshold_reply_count,
5550 megasas_irqpoll);
5551 }
5552 }
5553
5554 /*
5555 * megasas_setup_irqs_ioapic - register legacy interrupts.
5556 * @instance: Adapter soft state
5557 *
5558 * Do not enable interrupt, only setup ISRs.
5559 *
5560 * Return 0 on success.
5561 */
5562 static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5563 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5564 {
5565 struct pci_dev *pdev;
5566
5567 pdev = instance->pdev;
5568 instance->irq_context[0].instance = instance;
5569 instance->irq_context[0].MSIxIndex = 0;
5570 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5571 "megasas", instance->host->host_no);
5572 if (request_irq(pci_irq_vector(pdev, 0),
5573 instance->instancet->service_isr, IRQF_SHARED,
5574 instance->irq_context->name, &instance->irq_context[0])) {
5575 dev_err(&instance->pdev->dev,
5576 "Failed to register IRQ from %s %d\n",
5577 __func__, __LINE__);
5578 return -1;
5579 }
5580 instance->perf_mode = MR_LATENCY_PERF_MODE;
5581 instance->low_latency_index_start = 0;
5582 return 0;
5583 }
5584
5585 /**
5586 * megasas_setup_irqs_msix - register MSI-x interrupts.
5587 * @instance: Adapter soft state
5588 * @is_probe: Driver probe check
5589 *
5590 * Do not enable interrupt, only setup ISRs.
5591 *
5592 * Return 0 on success.
5593 */
5594 static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5595 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5596 {
5597 int i, j;
5598 struct pci_dev *pdev;
5599
5600 pdev = instance->pdev;
5601
5602 /* Try MSI-x */
5603 for (i = 0; i < instance->msix_vectors; i++) {
5604 instance->irq_context[i].instance = instance;
5605 instance->irq_context[i].MSIxIndex = i;
5606 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5607 "megasas", instance->host->host_no, i);
5608 if (request_irq(pci_irq_vector(pdev, i),
5609 instance->instancet->service_isr, 0, instance->irq_context[i].name,
5610 &instance->irq_context[i])) {
5611 dev_err(&instance->pdev->dev,
5612 "Failed to register IRQ for vector %d.\n", i);
5613 for (j = 0; j < i; j++) {
5614 if (j < instance->low_latency_index_start)
5615 irq_set_affinity_hint(
5616 pci_irq_vector(pdev, j), NULL);
5617 free_irq(pci_irq_vector(pdev, j),
5618 &instance->irq_context[j]);
5619 }
5620 /* Retry irq register for IO_APIC*/
5621 instance->msix_vectors = 0;
5622 instance->msix_load_balance = false;
5623 if (is_probe) {
5624 pci_free_irq_vectors(instance->pdev);
5625 return megasas_setup_irqs_ioapic(instance);
5626 } else {
5627 return -1;
5628 }
5629 }
5630 }
5631
5632 return 0;
5633 }
5634
5635 /*
5636 * megasas_destroy_irqs- unregister interrupts.
5637 * @instance: Adapter soft state
5638 * return: void
5639 */
5640 static void
megasas_destroy_irqs(struct megasas_instance * instance)5641 megasas_destroy_irqs(struct megasas_instance *instance) {
5642
5643 int i;
5644 int count;
5645 struct megasas_irq_context *irq_ctx;
5646
5647 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5648 if (instance->adapter_type != MFI_SERIES) {
5649 for (i = 0; i < count; i++) {
5650 irq_ctx = &instance->irq_context[i];
5651 irq_poll_disable(&irq_ctx->irqpoll);
5652 }
5653 }
5654
5655 if (instance->msix_vectors)
5656 for (i = 0; i < instance->msix_vectors; i++) {
5657 if (i < instance->low_latency_index_start)
5658 irq_set_affinity_hint(
5659 pci_irq_vector(instance->pdev, i), NULL);
5660 free_irq(pci_irq_vector(instance->pdev, i),
5661 &instance->irq_context[i]);
5662 }
5663 else
5664 free_irq(pci_irq_vector(instance->pdev, 0),
5665 &instance->irq_context[0]);
5666 }
5667
5668 /**
5669 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5670 * @instance: Adapter soft state
5671 *
5672 * Return 0 on success.
5673 */
5674 void
megasas_setup_jbod_map(struct megasas_instance * instance)5675 megasas_setup_jbod_map(struct megasas_instance *instance)
5676 {
5677 int i;
5678 struct fusion_context *fusion = instance->ctrl_context;
5679 u32 pd_seq_map_sz;
5680
5681 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5682 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5683
5684 instance->use_seqnum_jbod_fp =
5685 instance->support_seqnum_jbod_fp;
5686 if (reset_devices || !fusion ||
5687 !instance->support_seqnum_jbod_fp) {
5688 dev_info(&instance->pdev->dev,
5689 "JBOD sequence map is disabled %s %d\n",
5690 __func__, __LINE__);
5691 instance->use_seqnum_jbod_fp = false;
5692 return;
5693 }
5694
5695 if (fusion->pd_seq_sync[0])
5696 goto skip_alloc;
5697
5698 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5699 fusion->pd_seq_sync[i] = dma_alloc_coherent
5700 (&instance->pdev->dev, pd_seq_map_sz,
5701 &fusion->pd_seq_phys[i], GFP_KERNEL);
5702 if (!fusion->pd_seq_sync[i]) {
5703 dev_err(&instance->pdev->dev,
5704 "Failed to allocate memory from %s %d\n",
5705 __func__, __LINE__);
5706 if (i == 1) {
5707 dma_free_coherent(&instance->pdev->dev,
5708 pd_seq_map_sz, fusion->pd_seq_sync[0],
5709 fusion->pd_seq_phys[0]);
5710 fusion->pd_seq_sync[0] = NULL;
5711 }
5712 instance->use_seqnum_jbod_fp = false;
5713 return;
5714 }
5715 }
5716
5717 skip_alloc:
5718 if (!megasas_sync_pd_seq_num(instance, false) &&
5719 !megasas_sync_pd_seq_num(instance, true))
5720 instance->use_seqnum_jbod_fp = true;
5721 else
5722 instance->use_seqnum_jbod_fp = false;
5723 }
5724
megasas_setup_reply_map(struct megasas_instance * instance)5725 static void megasas_setup_reply_map(struct megasas_instance *instance)
5726 {
5727 const struct cpumask *mask;
5728 unsigned int queue, cpu, low_latency_index_start;
5729
5730 low_latency_index_start = instance->low_latency_index_start;
5731
5732 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5733 mask = pci_irq_get_affinity(instance->pdev, queue);
5734 if (!mask)
5735 goto fallback;
5736
5737 for_each_cpu(cpu, mask)
5738 instance->reply_map[cpu] = queue;
5739 }
5740 return;
5741
5742 fallback:
5743 queue = low_latency_index_start;
5744 for_each_possible_cpu(cpu) {
5745 instance->reply_map[cpu] = queue;
5746 if (queue == (instance->msix_vectors - 1))
5747 queue = low_latency_index_start;
5748 else
5749 queue++;
5750 }
5751 }
5752
5753 /**
5754 * megasas_get_device_list - Get the PD and LD device list from FW.
5755 * @instance: Adapter soft state
5756 * @return: Success or failure
5757 *
5758 * Issue DCMDs to Firmware to get the PD and LD list.
5759 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5760 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5761 */
5762 static
megasas_get_device_list(struct megasas_instance * instance)5763 int megasas_get_device_list(struct megasas_instance *instance)
5764 {
5765 memset(instance->pd_list, 0,
5766 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5767 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5768
5769 if (instance->enable_fw_dev_list) {
5770 if (megasas_host_device_list_query(instance, true))
5771 return FAILED;
5772 } else {
5773 if (megasas_get_pd_list(instance) < 0) {
5774 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5775 return FAILED;
5776 }
5777
5778 if (megasas_ld_list_query(instance,
5779 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5780 dev_err(&instance->pdev->dev, "failed to get LD list\n");
5781 return FAILED;
5782 }
5783 }
5784
5785 return SUCCESS;
5786 }
5787
5788 /**
5789 * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
5790 * @instance: Adapter soft state
5791 * return: void
5792 */
5793 static inline void
megasas_set_high_iops_queue_affinity_hint(struct megasas_instance * instance)5794 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5795 {
5796 int i;
5797 int local_numa_node;
5798
5799 if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5800 local_numa_node = dev_to_node(&instance->pdev->dev);
5801
5802 for (i = 0; i < instance->low_latency_index_start; i++)
5803 irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5804 cpumask_of_node(local_numa_node));
5805 }
5806 }
5807
5808 static int
__megasas_alloc_irq_vectors(struct megasas_instance * instance)5809 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5810 {
5811 int i, irq_flags;
5812 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5813 struct irq_affinity *descp = &desc;
5814
5815 irq_flags = PCI_IRQ_MSIX;
5816
5817 if (instance->smp_affinity_enable)
5818 irq_flags |= PCI_IRQ_AFFINITY;
5819 else
5820 descp = NULL;
5821
5822 i = pci_alloc_irq_vectors_affinity(instance->pdev,
5823 instance->low_latency_index_start,
5824 instance->msix_vectors, irq_flags, descp);
5825
5826 return i;
5827 }
5828
5829 /**
5830 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
5831 * @instance: Adapter soft state
5832 * return: void
5833 */
5834 static void
megasas_alloc_irq_vectors(struct megasas_instance * instance)5835 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5836 {
5837 int i;
5838 unsigned int num_msix_req;
5839
5840 i = __megasas_alloc_irq_vectors(instance);
5841
5842 if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5843 (i != instance->msix_vectors)) {
5844 if (instance->msix_vectors)
5845 pci_free_irq_vectors(instance->pdev);
5846 /* Disable Balanced IOPS mode and try realloc vectors */
5847 instance->perf_mode = MR_LATENCY_PERF_MODE;
5848 instance->low_latency_index_start = 1;
5849 num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5850
5851 instance->msix_vectors = min(num_msix_req,
5852 instance->msix_vectors);
5853
5854 i = __megasas_alloc_irq_vectors(instance);
5855
5856 }
5857
5858 dev_info(&instance->pdev->dev,
5859 "requested/available msix %d/%d\n", instance->msix_vectors, i);
5860
5861 if (i > 0)
5862 instance->msix_vectors = i;
5863 else
5864 instance->msix_vectors = 0;
5865
5866 if (instance->smp_affinity_enable)
5867 megasas_set_high_iops_queue_affinity_hint(instance);
5868 }
5869
5870 /**
5871 * megasas_init_fw - Initializes the FW
5872 * @instance: Adapter soft state
5873 *
5874 * This is the main function for initializing firmware
5875 */
5876
megasas_init_fw(struct megasas_instance * instance)5877 static int megasas_init_fw(struct megasas_instance *instance)
5878 {
5879 u32 max_sectors_1;
5880 u32 max_sectors_2, tmp_sectors, msix_enable;
5881 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5882 resource_size_t base_addr;
5883 void *base_addr_phys;
5884 struct megasas_ctrl_info *ctrl_info = NULL;
5885 unsigned long bar_list;
5886 int i, j, loop;
5887 struct IOV_111 *iovPtr;
5888 struct fusion_context *fusion;
5889 bool intr_coalescing;
5890 unsigned int num_msix_req;
5891 u16 lnksta, speed;
5892
5893 fusion = instance->ctrl_context;
5894
5895 /* Find first memory bar */
5896 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5897 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5898 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5899 "megasas: LSI")) {
5900 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5901 return -EBUSY;
5902 }
5903
5904 base_addr = pci_resource_start(instance->pdev, instance->bar);
5905 instance->reg_set = ioremap(base_addr, 8192);
5906
5907 if (!instance->reg_set) {
5908 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5909 goto fail_ioremap;
5910 }
5911
5912 base_addr_phys = &base_addr;
5913 dev_printk(KERN_DEBUG, &instance->pdev->dev,
5914 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
5915 instance->bar, base_addr_phys, instance->reg_set);
5916
5917 if (instance->adapter_type != MFI_SERIES)
5918 instance->instancet = &megasas_instance_template_fusion;
5919 else {
5920 switch (instance->pdev->device) {
5921 case PCI_DEVICE_ID_LSI_SAS1078R:
5922 case PCI_DEVICE_ID_LSI_SAS1078DE:
5923 instance->instancet = &megasas_instance_template_ppc;
5924 break;
5925 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5926 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5927 instance->instancet = &megasas_instance_template_gen2;
5928 break;
5929 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5930 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5931 instance->instancet = &megasas_instance_template_skinny;
5932 break;
5933 case PCI_DEVICE_ID_LSI_SAS1064R:
5934 case PCI_DEVICE_ID_DELL_PERC5:
5935 default:
5936 instance->instancet = &megasas_instance_template_xscale;
5937 instance->pd_list_not_supported = 1;
5938 break;
5939 }
5940 }
5941
5942 if (megasas_transition_to_ready(instance, 0)) {
5943 dev_info(&instance->pdev->dev,
5944 "Failed to transition controller to ready from %s!\n",
5945 __func__);
5946 if (instance->adapter_type != MFI_SERIES) {
5947 status_reg = instance->instancet->read_fw_status_reg(
5948 instance);
5949 if (status_reg & MFI_RESET_ADAPTER) {
5950 if (megasas_adp_reset_wait_for_ready
5951 (instance, true, 0) == FAILED)
5952 goto fail_ready_state;
5953 } else {
5954 goto fail_ready_state;
5955 }
5956 } else {
5957 atomic_set(&instance->fw_reset_no_pci_access, 1);
5958 instance->instancet->adp_reset
5959 (instance, instance->reg_set);
5960 atomic_set(&instance->fw_reset_no_pci_access, 0);
5961
5962 /*waiting for about 30 second before retry*/
5963 ssleep(30);
5964
5965 if (megasas_transition_to_ready(instance, 0))
5966 goto fail_ready_state;
5967 }
5968
5969 dev_info(&instance->pdev->dev,
5970 "FW restarted successfully from %s!\n",
5971 __func__);
5972 }
5973
5974 megasas_init_ctrl_params(instance);
5975
5976 if (megasas_set_dma_mask(instance))
5977 goto fail_ready_state;
5978
5979 if (megasas_alloc_ctrl_mem(instance))
5980 goto fail_alloc_dma_buf;
5981
5982 if (megasas_alloc_ctrl_dma_buffers(instance))
5983 goto fail_alloc_dma_buf;
5984
5985 fusion = instance->ctrl_context;
5986
5987 if (instance->adapter_type >= VENTURA_SERIES) {
5988 scratch_pad_2 =
5989 megasas_readl(instance,
5990 &instance->reg_set->outbound_scratch_pad_2);
5991 instance->max_raid_mapsize = ((scratch_pad_2 >>
5992 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5993 MR_MAX_RAID_MAP_SIZE_MASK);
5994 }
5995
5996 instance->enable_sdev_max_qd = enable_sdev_max_qd;
5997
5998 switch (instance->adapter_type) {
5999 case VENTURA_SERIES:
6000 fusion->pcie_bw_limitation = true;
6001 break;
6002 case AERO_SERIES:
6003 fusion->r56_div_offload = true;
6004 break;
6005 default:
6006 break;
6007 }
6008
6009 /* Check if MSI-X is supported while in ready state */
6010 msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6011 0x4000000) >> 0x1a;
6012 if (msix_enable && !msix_disable) {
6013
6014 scratch_pad_1 = megasas_readl
6015 (instance, &instance->reg_set->outbound_scratch_pad_1);
6016 /* Check max MSI-X vectors */
6017 if (fusion) {
6018 if (instance->adapter_type == THUNDERBOLT_SERIES) {
6019 /* Thunderbolt Series*/
6020 instance->msix_vectors = (scratch_pad_1
6021 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6022 } else {
6023 instance->msix_vectors = ((scratch_pad_1
6024 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6025 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6026
6027 /*
6028 * For Invader series, > 8 MSI-x vectors
6029 * supported by FW/HW implies combined
6030 * reply queue mode is enabled.
6031 * For Ventura series, > 16 MSI-x vectors
6032 * supported by FW/HW implies combined
6033 * reply queue mode is enabled.
6034 */
6035 switch (instance->adapter_type) {
6036 case INVADER_SERIES:
6037 if (instance->msix_vectors > 8)
6038 instance->msix_combined = true;
6039 break;
6040 case AERO_SERIES:
6041 case VENTURA_SERIES:
6042 if (instance->msix_vectors > 16)
6043 instance->msix_combined = true;
6044 break;
6045 }
6046
6047 if (rdpq_enable)
6048 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6049 1 : 0;
6050
6051 if (instance->adapter_type >= INVADER_SERIES &&
6052 !instance->msix_combined) {
6053 instance->msix_load_balance = true;
6054 instance->smp_affinity_enable = false;
6055 }
6056
6057 /* Save 1-15 reply post index address to local memory
6058 * Index 0 is already saved from reg offset
6059 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6060 */
6061 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6062 instance->reply_post_host_index_addr[loop] =
6063 (u32 __iomem *)
6064 ((u8 __iomem *)instance->reg_set +
6065 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6066 + (loop * 0x10));
6067 }
6068 }
6069
6070 dev_info(&instance->pdev->dev,
6071 "firmware supports msix\t: (%d)",
6072 instance->msix_vectors);
6073 if (msix_vectors)
6074 instance->msix_vectors = min(msix_vectors,
6075 instance->msix_vectors);
6076 } else /* MFI adapters */
6077 instance->msix_vectors = 1;
6078
6079
6080 /*
6081 * For Aero (if some conditions are met), driver will configure a
6082 * few additional reply queues with interrupt coalescing enabled.
6083 * These queues with interrupt coalescing enabled are called
6084 * High IOPS queues and rest of reply queues (based on number of
6085 * logical CPUs) are termed as Low latency queues.
6086 *
6087 * Total Number of reply queues = High IOPS queues + low latency queues
6088 *
6089 * For rest of fusion adapters, 1 additional reply queue will be
6090 * reserved for management commands, rest of reply queues
6091 * (based on number of logical CPUs) will be used for IOs and
6092 * referenced as IO queues.
6093 * Total Number of reply queues = 1 + IO queues
6094 *
6095 * MFI adapters supports single MSI-x so single reply queue
6096 * will be used for IO and management commands.
6097 */
6098
6099 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6100 true : false;
6101 if (intr_coalescing &&
6102 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6103 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6104 instance->perf_mode = MR_BALANCED_PERF_MODE;
6105 else
6106 instance->perf_mode = MR_LATENCY_PERF_MODE;
6107
6108
6109 if (instance->adapter_type == AERO_SERIES) {
6110 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6111 speed = lnksta & PCI_EXP_LNKSTA_CLS;
6112
6113 /*
6114 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6115 * in latency perf mode and enable R1 PCI bandwidth algorithm
6116 */
6117 if (speed < 0x4) {
6118 instance->perf_mode = MR_LATENCY_PERF_MODE;
6119 fusion->pcie_bw_limitation = true;
6120 }
6121
6122 /*
6123 * Performance mode settings provided through module parameter-perf_mode will
6124 * take affect only for:
6125 * 1. Aero family of adapters.
6126 * 2. When user sets module parameter- perf_mode in range of 0-2.
6127 */
6128 if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6129 (perf_mode <= MR_LATENCY_PERF_MODE))
6130 instance->perf_mode = perf_mode;
6131 /*
6132 * If intr coalescing is not supported by controller FW, then IOPS
6133 * and Balanced modes are not feasible.
6134 */
6135 if (!intr_coalescing)
6136 instance->perf_mode = MR_LATENCY_PERF_MODE;
6137
6138 }
6139
6140 if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6141 instance->low_latency_index_start =
6142 MR_HIGH_IOPS_QUEUE_COUNT;
6143 else
6144 instance->low_latency_index_start = 1;
6145
6146 num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6147
6148 instance->msix_vectors = min(num_msix_req,
6149 instance->msix_vectors);
6150
6151 megasas_alloc_irq_vectors(instance);
6152 if (!instance->msix_vectors)
6153 instance->msix_load_balance = false;
6154 }
6155 /*
6156 * MSI-X host index 0 is common for all adapter.
6157 * It is used for all MPT based Adapters.
6158 */
6159 if (instance->msix_combined) {
6160 instance->reply_post_host_index_addr[0] =
6161 (u32 *)((u8 *)instance->reg_set +
6162 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6163 } else {
6164 instance->reply_post_host_index_addr[0] =
6165 (u32 *)((u8 *)instance->reg_set +
6166 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6167 }
6168
6169 if (!instance->msix_vectors) {
6170 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6171 if (i < 0)
6172 goto fail_init_adapter;
6173 }
6174
6175 megasas_setup_reply_map(instance);
6176
6177 dev_info(&instance->pdev->dev,
6178 "current msix/online cpus\t: (%d/%d)\n",
6179 instance->msix_vectors, (unsigned int)num_online_cpus());
6180 dev_info(&instance->pdev->dev,
6181 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6182
6183 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6184 (unsigned long)instance);
6185
6186 /*
6187 * Below are default value for legacy Firmware.
6188 * non-fusion based controllers
6189 */
6190 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6191 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6192 /* Get operational params, sge flags, send init cmd to controller */
6193 if (instance->instancet->init_adapter(instance))
6194 goto fail_init_adapter;
6195
6196 if (instance->adapter_type >= VENTURA_SERIES) {
6197 scratch_pad_3 =
6198 megasas_readl(instance,
6199 &instance->reg_set->outbound_scratch_pad_3);
6200 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6201 MR_DEFAULT_NVME_PAGE_SHIFT)
6202 instance->nvme_page_size =
6203 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6204
6205 dev_info(&instance->pdev->dev,
6206 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6207 }
6208
6209 if (instance->msix_vectors ?
6210 megasas_setup_irqs_msix(instance, 1) :
6211 megasas_setup_irqs_ioapic(instance))
6212 goto fail_init_adapter;
6213
6214 if (instance->adapter_type != MFI_SERIES)
6215 megasas_setup_irq_poll(instance);
6216
6217 instance->instancet->enable_intr(instance);
6218
6219 dev_info(&instance->pdev->dev, "INIT adapter done\n");
6220
6221 megasas_setup_jbod_map(instance);
6222
6223 if (megasas_get_device_list(instance) != SUCCESS) {
6224 dev_err(&instance->pdev->dev,
6225 "%s: megasas_get_device_list failed\n",
6226 __func__);
6227 goto fail_get_ld_pd_list;
6228 }
6229
6230 /* stream detection initialization */
6231 if (instance->adapter_type >= VENTURA_SERIES) {
6232 fusion->stream_detect_by_ld =
6233 kcalloc(MAX_LOGICAL_DRIVES_EXT,
6234 sizeof(struct LD_STREAM_DETECT *),
6235 GFP_KERNEL);
6236 if (!fusion->stream_detect_by_ld) {
6237 dev_err(&instance->pdev->dev,
6238 "unable to allocate stream detection for pool of LDs\n");
6239 goto fail_get_ld_pd_list;
6240 }
6241 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6242 fusion->stream_detect_by_ld[i] =
6243 kzalloc(sizeof(struct LD_STREAM_DETECT),
6244 GFP_KERNEL);
6245 if (!fusion->stream_detect_by_ld[i]) {
6246 dev_err(&instance->pdev->dev,
6247 "unable to allocate stream detect by LD\n ");
6248 for (j = 0; j < i; ++j)
6249 kfree(fusion->stream_detect_by_ld[j]);
6250 kfree(fusion->stream_detect_by_ld);
6251 fusion->stream_detect_by_ld = NULL;
6252 goto fail_get_ld_pd_list;
6253 }
6254 fusion->stream_detect_by_ld[i]->mru_bit_map
6255 = MR_STREAM_BITMAP;
6256 }
6257 }
6258
6259 /*
6260 * Compute the max allowed sectors per IO: The controller info has two
6261 * limits on max sectors. Driver should use the minimum of these two.
6262 *
6263 * 1 << stripe_sz_ops.min = max sectors per strip
6264 *
6265 * Note that older firmwares ( < FW ver 30) didn't report information
6266 * to calculate max_sectors_1. So the number ended up as zero always.
6267 */
6268 tmp_sectors = 0;
6269 ctrl_info = instance->ctrl_info_buf;
6270
6271 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6272 le16_to_cpu(ctrl_info->max_strips_per_io);
6273 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6274
6275 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6276
6277 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6278 instance->passive = ctrl_info->cluster.passive;
6279 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6280 instance->UnevenSpanSupport =
6281 ctrl_info->adapterOperations2.supportUnevenSpans;
6282 if (instance->UnevenSpanSupport) {
6283 struct fusion_context *fusion = instance->ctrl_context;
6284 if (MR_ValidateMapInfo(instance, instance->map_id))
6285 fusion->fast_path_io = 1;
6286 else
6287 fusion->fast_path_io = 0;
6288
6289 }
6290 if (ctrl_info->host_interface.SRIOV) {
6291 instance->requestorId = ctrl_info->iov.requestorId;
6292 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6293 if (!ctrl_info->adapterOperations2.activePassive)
6294 instance->PlasmaFW111 = 1;
6295
6296 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6297 instance->PlasmaFW111 ? "1.11" : "new");
6298
6299 if (instance->PlasmaFW111) {
6300 iovPtr = (struct IOV_111 *)
6301 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
6302 instance->requestorId = iovPtr->requestorId;
6303 }
6304 }
6305 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6306 instance->requestorId);
6307 }
6308
6309 instance->crash_dump_fw_support =
6310 ctrl_info->adapterOperations3.supportCrashDump;
6311 instance->crash_dump_drv_support =
6312 (instance->crash_dump_fw_support &&
6313 instance->crash_dump_buf);
6314 if (instance->crash_dump_drv_support)
6315 megasas_set_crash_dump_params(instance,
6316 MR_CRASH_BUF_TURN_OFF);
6317
6318 else {
6319 if (instance->crash_dump_buf)
6320 dma_free_coherent(&instance->pdev->dev,
6321 CRASH_DMA_BUF_SIZE,
6322 instance->crash_dump_buf,
6323 instance->crash_dump_h);
6324 instance->crash_dump_buf = NULL;
6325 }
6326
6327 if (instance->snapdump_wait_time) {
6328 megasas_get_snapdump_properties(instance);
6329 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6330 instance->snapdump_wait_time);
6331 }
6332
6333 dev_info(&instance->pdev->dev,
6334 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6335 le16_to_cpu(ctrl_info->pci.vendor_id),
6336 le16_to_cpu(ctrl_info->pci.device_id),
6337 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6338 le16_to_cpu(ctrl_info->pci.sub_device_id));
6339 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
6340 instance->UnevenSpanSupport ? "yes" : "no");
6341 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
6342 instance->crash_dump_drv_support ? "yes" : "no");
6343 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
6344 instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6345
6346 instance->max_sectors_per_req = instance->max_num_sge *
6347 SGE_BUFFER_SIZE / 512;
6348 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6349 instance->max_sectors_per_req = tmp_sectors;
6350
6351 /* Check for valid throttlequeuedepth module parameter */
6352 if (throttlequeuedepth &&
6353 throttlequeuedepth <= instance->max_scsi_cmds)
6354 instance->throttlequeuedepth = throttlequeuedepth;
6355 else
6356 instance->throttlequeuedepth =
6357 MEGASAS_THROTTLE_QUEUE_DEPTH;
6358
6359 if ((resetwaittime < 1) ||
6360 (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6361 resetwaittime = MEGASAS_RESET_WAIT_TIME;
6362
6363 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6364 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6365
6366 /* Launch SR-IOV heartbeat timer */
6367 if (instance->requestorId) {
6368 if (!megasas_sriov_start_heartbeat(instance, 1)) {
6369 megasas_start_timer(instance);
6370 } else {
6371 instance->skip_heartbeat_timer_del = 1;
6372 goto fail_get_ld_pd_list;
6373 }
6374 }
6375
6376 /*
6377 * Create and start watchdog thread which will monitor
6378 * controller state every 1 sec and trigger OCR when
6379 * it enters fault state
6380 */
6381 if (instance->adapter_type != MFI_SERIES)
6382 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6383 goto fail_start_watchdog;
6384
6385 return 0;
6386
6387 fail_start_watchdog:
6388 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6389 del_timer_sync(&instance->sriov_heartbeat_timer);
6390 fail_get_ld_pd_list:
6391 instance->instancet->disable_intr(instance);
6392 megasas_destroy_irqs(instance);
6393 fail_init_adapter:
6394 if (instance->msix_vectors)
6395 pci_free_irq_vectors(instance->pdev);
6396 instance->msix_vectors = 0;
6397 fail_alloc_dma_buf:
6398 megasas_free_ctrl_dma_buffers(instance);
6399 megasas_free_ctrl_mem(instance);
6400 fail_ready_state:
6401 iounmap(instance->reg_set);
6402
6403 fail_ioremap:
6404 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6405
6406 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6407 __func__, __LINE__);
6408 return -EINVAL;
6409 }
6410
6411 /**
6412 * megasas_release_mfi - Reverses the FW initialization
6413 * @instance: Adapter soft state
6414 */
megasas_release_mfi(struct megasas_instance * instance)6415 static void megasas_release_mfi(struct megasas_instance *instance)
6416 {
6417 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6418
6419 if (instance->reply_queue)
6420 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6421 instance->reply_queue, instance->reply_queue_h);
6422
6423 megasas_free_cmds(instance);
6424
6425 iounmap(instance->reg_set);
6426
6427 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6428 }
6429
6430 /**
6431 * megasas_get_seq_num - Gets latest event sequence numbers
6432 * @instance: Adapter soft state
6433 * @eli: FW event log sequence numbers information
6434 *
6435 * FW maintains a log of all events in a non-volatile area. Upper layers would
6436 * usually find out the latest sequence number of the events, the seq number at
6437 * the boot etc. They would "read" all the events below the latest seq number
6438 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6439 * number), they would subsribe to AEN (asynchronous event notification) and
6440 * wait for the events to happen.
6441 */
6442 static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)6443 megasas_get_seq_num(struct megasas_instance *instance,
6444 struct megasas_evt_log_info *eli)
6445 {
6446 struct megasas_cmd *cmd;
6447 struct megasas_dcmd_frame *dcmd;
6448 struct megasas_evt_log_info *el_info;
6449 dma_addr_t el_info_h = 0;
6450 int ret;
6451
6452 cmd = megasas_get_cmd(instance);
6453
6454 if (!cmd) {
6455 return -ENOMEM;
6456 }
6457
6458 dcmd = &cmd->frame->dcmd;
6459 el_info = dma_alloc_coherent(&instance->pdev->dev,
6460 sizeof(struct megasas_evt_log_info),
6461 &el_info_h, GFP_KERNEL);
6462 if (!el_info) {
6463 megasas_return_cmd(instance, cmd);
6464 return -ENOMEM;
6465 }
6466
6467 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6468
6469 dcmd->cmd = MFI_CMD_DCMD;
6470 dcmd->cmd_status = 0x0;
6471 dcmd->sge_count = 1;
6472 dcmd->flags = MFI_FRAME_DIR_READ;
6473 dcmd->timeout = 0;
6474 dcmd->pad_0 = 0;
6475 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6476 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6477
6478 megasas_set_dma_settings(instance, dcmd, el_info_h,
6479 sizeof(struct megasas_evt_log_info));
6480
6481 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6482 if (ret != DCMD_SUCCESS) {
6483 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6484 __func__, __LINE__);
6485 goto dcmd_failed;
6486 }
6487
6488 /*
6489 * Copy the data back into callers buffer
6490 */
6491 eli->newest_seq_num = el_info->newest_seq_num;
6492 eli->oldest_seq_num = el_info->oldest_seq_num;
6493 eli->clear_seq_num = el_info->clear_seq_num;
6494 eli->shutdown_seq_num = el_info->shutdown_seq_num;
6495 eli->boot_seq_num = el_info->boot_seq_num;
6496
6497 dcmd_failed:
6498 dma_free_coherent(&instance->pdev->dev,
6499 sizeof(struct megasas_evt_log_info),
6500 el_info, el_info_h);
6501
6502 megasas_return_cmd(instance, cmd);
6503
6504 return ret;
6505 }
6506
6507 /**
6508 * megasas_register_aen - Registers for asynchronous event notification
6509 * @instance: Adapter soft state
6510 * @seq_num: The starting sequence number
6511 * @class_locale_word: Class of the event
6512 *
6513 * This function subscribes for AEN for events beyond the @seq_num. It requests
6514 * to be notified if and only if the event is of type @class_locale
6515 */
6516 static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)6517 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6518 u32 class_locale_word)
6519 {
6520 int ret_val;
6521 struct megasas_cmd *cmd;
6522 struct megasas_dcmd_frame *dcmd;
6523 union megasas_evt_class_locale curr_aen;
6524 union megasas_evt_class_locale prev_aen;
6525
6526 /*
6527 * If there an AEN pending already (aen_cmd), check if the
6528 * class_locale of that pending AEN is inclusive of the new
6529 * AEN request we currently have. If it is, then we don't have
6530 * to do anything. In other words, whichever events the current
6531 * AEN request is subscribing to, have already been subscribed
6532 * to.
6533 *
6534 * If the old_cmd is _not_ inclusive, then we have to abort
6535 * that command, form a class_locale that is superset of both
6536 * old and current and re-issue to the FW
6537 */
6538
6539 curr_aen.word = class_locale_word;
6540
6541 if (instance->aen_cmd) {
6542
6543 prev_aen.word =
6544 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6545
6546 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6547 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6548 dev_info(&instance->pdev->dev,
6549 "%s %d out of range class %d send by application\n",
6550 __func__, __LINE__, curr_aen.members.class);
6551 return 0;
6552 }
6553
6554 /*
6555 * A class whose enum value is smaller is inclusive of all
6556 * higher values. If a PROGRESS (= -1) was previously
6557 * registered, then a new registration requests for higher
6558 * classes need not be sent to FW. They are automatically
6559 * included.
6560 *
6561 * Locale numbers don't have such hierarchy. They are bitmap
6562 * values
6563 */
6564 if ((prev_aen.members.class <= curr_aen.members.class) &&
6565 !((prev_aen.members.locale & curr_aen.members.locale) ^
6566 curr_aen.members.locale)) {
6567 /*
6568 * Previously issued event registration includes
6569 * current request. Nothing to do.
6570 */
6571 return 0;
6572 } else {
6573 curr_aen.members.locale |= prev_aen.members.locale;
6574
6575 if (prev_aen.members.class < curr_aen.members.class)
6576 curr_aen.members.class = prev_aen.members.class;
6577
6578 instance->aen_cmd->abort_aen = 1;
6579 ret_val = megasas_issue_blocked_abort_cmd(instance,
6580 instance->
6581 aen_cmd, 30);
6582
6583 if (ret_val) {
6584 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6585 "previous AEN command\n");
6586 return ret_val;
6587 }
6588 }
6589 }
6590
6591 cmd = megasas_get_cmd(instance);
6592
6593 if (!cmd)
6594 return -ENOMEM;
6595
6596 dcmd = &cmd->frame->dcmd;
6597
6598 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6599
6600 /*
6601 * Prepare DCMD for aen registration
6602 */
6603 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6604
6605 dcmd->cmd = MFI_CMD_DCMD;
6606 dcmd->cmd_status = 0x0;
6607 dcmd->sge_count = 1;
6608 dcmd->flags = MFI_FRAME_DIR_READ;
6609 dcmd->timeout = 0;
6610 dcmd->pad_0 = 0;
6611 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6612 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6613 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6614 instance->last_seq_num = seq_num;
6615 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6616
6617 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6618 sizeof(struct megasas_evt_detail));
6619
6620 if (instance->aen_cmd != NULL) {
6621 megasas_return_cmd(instance, cmd);
6622 return 0;
6623 }
6624
6625 /*
6626 * Store reference to the cmd used to register for AEN. When an
6627 * application wants us to register for AEN, we have to abort this
6628 * cmd and re-register with a new EVENT LOCALE supplied by that app
6629 */
6630 instance->aen_cmd = cmd;
6631
6632 /*
6633 * Issue the aen registration frame
6634 */
6635 instance->instancet->issue_dcmd(instance, cmd);
6636
6637 return 0;
6638 }
6639
6640 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6641 *
6642 * This DCMD will fetch few properties of LD/system PD defined
6643 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6644 *
6645 * DCMD send by drivers whenever new target is added to the OS.
6646 *
6647 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
6648 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
6649 * 0 = system PD, 1 = LD.
6650 * dcmd.mbox.s[1] - TargetID for LD/system PD.
6651 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
6652 *
6653 * @instance: Adapter soft state
6654 * @sdev: OS provided scsi device
6655 *
6656 * Returns 0 on success non-zero on failure.
6657 */
6658 int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)6659 megasas_get_target_prop(struct megasas_instance *instance,
6660 struct scsi_device *sdev)
6661 {
6662 int ret;
6663 struct megasas_cmd *cmd;
6664 struct megasas_dcmd_frame *dcmd;
6665 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6666 sdev->id;
6667
6668 cmd = megasas_get_cmd(instance);
6669
6670 if (!cmd) {
6671 dev_err(&instance->pdev->dev,
6672 "Failed to get cmd %s\n", __func__);
6673 return -ENOMEM;
6674 }
6675
6676 dcmd = &cmd->frame->dcmd;
6677
6678 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6679 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6680 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6681
6682 dcmd->mbox.s[1] = cpu_to_le16(targetId);
6683 dcmd->cmd = MFI_CMD_DCMD;
6684 dcmd->cmd_status = 0xFF;
6685 dcmd->sge_count = 1;
6686 dcmd->flags = MFI_FRAME_DIR_READ;
6687 dcmd->timeout = 0;
6688 dcmd->pad_0 = 0;
6689 dcmd->data_xfer_len =
6690 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6691 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6692
6693 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6694 sizeof(struct MR_TARGET_PROPERTIES));
6695
6696 if ((instance->adapter_type != MFI_SERIES) &&
6697 !instance->mask_interrupts)
6698 ret = megasas_issue_blocked_cmd(instance,
6699 cmd, MFI_IO_TIMEOUT_SECS);
6700 else
6701 ret = megasas_issue_polled(instance, cmd);
6702
6703 switch (ret) {
6704 case DCMD_TIMEOUT:
6705 switch (dcmd_timeout_ocr_possible(instance)) {
6706 case INITIATE_OCR:
6707 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6708 mutex_unlock(&instance->reset_mutex);
6709 megasas_reset_fusion(instance->host,
6710 MFI_IO_TIMEOUT_OCR);
6711 mutex_lock(&instance->reset_mutex);
6712 break;
6713 case KILL_ADAPTER:
6714 megaraid_sas_kill_hba(instance);
6715 break;
6716 case IGNORE_TIMEOUT:
6717 dev_info(&instance->pdev->dev,
6718 "Ignore DCMD timeout: %s %d\n",
6719 __func__, __LINE__);
6720 break;
6721 }
6722 break;
6723
6724 default:
6725 megasas_return_cmd(instance, cmd);
6726 }
6727 if (ret != DCMD_SUCCESS)
6728 dev_err(&instance->pdev->dev,
6729 "return from %s %d return value %d\n",
6730 __func__, __LINE__, ret);
6731
6732 return ret;
6733 }
6734
6735 /**
6736 * megasas_start_aen - Subscribes to AEN during driver load time
6737 * @instance: Adapter soft state
6738 */
megasas_start_aen(struct megasas_instance * instance)6739 static int megasas_start_aen(struct megasas_instance *instance)
6740 {
6741 struct megasas_evt_log_info eli;
6742 union megasas_evt_class_locale class_locale;
6743
6744 /*
6745 * Get the latest sequence number from FW
6746 */
6747 memset(&eli, 0, sizeof(eli));
6748
6749 if (megasas_get_seq_num(instance, &eli))
6750 return -1;
6751
6752 /*
6753 * Register AEN with FW for latest sequence number plus 1
6754 */
6755 class_locale.members.reserved = 0;
6756 class_locale.members.locale = MR_EVT_LOCALE_ALL;
6757 class_locale.members.class = MR_EVT_CLASS_DEBUG;
6758
6759 return megasas_register_aen(instance,
6760 le32_to_cpu(eli.newest_seq_num) + 1,
6761 class_locale.word);
6762 }
6763
6764 /**
6765 * megasas_io_attach - Attaches this driver to SCSI mid-layer
6766 * @instance: Adapter soft state
6767 */
megasas_io_attach(struct megasas_instance * instance)6768 static int megasas_io_attach(struct megasas_instance *instance)
6769 {
6770 struct Scsi_Host *host = instance->host;
6771
6772 /*
6773 * Export parameters required by SCSI mid-layer
6774 */
6775 host->unique_id = instance->unique_id;
6776 host->can_queue = instance->max_scsi_cmds;
6777 host->this_id = instance->init_id;
6778 host->sg_tablesize = instance->max_num_sge;
6779
6780 if (instance->fw_support_ieee)
6781 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6782
6783 /*
6784 * Check if the module parameter value for max_sectors can be used
6785 */
6786 if (max_sectors && max_sectors < instance->max_sectors_per_req)
6787 instance->max_sectors_per_req = max_sectors;
6788 else {
6789 if (max_sectors) {
6790 if (((instance->pdev->device ==
6791 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6792 (instance->pdev->device ==
6793 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6794 (max_sectors <= MEGASAS_MAX_SECTORS)) {
6795 instance->max_sectors_per_req = max_sectors;
6796 } else {
6797 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6798 "and <= %d (or < 1MB for GEN2 controller)\n",
6799 instance->max_sectors_per_req);
6800 }
6801 }
6802 }
6803
6804 host->max_sectors = instance->max_sectors_per_req;
6805 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6806 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6807 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6808 host->max_lun = MEGASAS_MAX_LUN;
6809 host->max_cmd_len = 16;
6810
6811 /*
6812 * Notify the mid-layer about the new controller
6813 */
6814 if (scsi_add_host(host, &instance->pdev->dev)) {
6815 dev_err(&instance->pdev->dev,
6816 "Failed to add host from %s %d\n",
6817 __func__, __LINE__);
6818 return -ENODEV;
6819 }
6820
6821 return 0;
6822 }
6823
6824 /**
6825 * megasas_set_dma_mask - Set DMA mask for supported controllers
6826 *
6827 * @instance: Adapter soft state
6828 * Description:
6829 *
6830 * For Ventura, driver/FW will operate in 63bit DMA addresses.
6831 *
6832 * For invader-
6833 * By default, driver/FW will operate in 32bit DMA addresses
6834 * for consistent DMA mapping but if 32 bit consistent
6835 * DMA mask fails, driver will try with 63 bit consistent
6836 * mask provided FW is true 63bit DMA capable
6837 *
6838 * For older controllers(Thunderbolt and MFI based adapters)-
6839 * driver/FW will operate in 32 bit consistent DMA addresses.
6840 */
6841 static int
megasas_set_dma_mask(struct megasas_instance * instance)6842 megasas_set_dma_mask(struct megasas_instance *instance)
6843 {
6844 u64 consistent_mask;
6845 struct pci_dev *pdev;
6846 u32 scratch_pad_1;
6847
6848 pdev = instance->pdev;
6849 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6850 DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6851
6852 if (IS_DMA64) {
6853 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6854 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6855 goto fail_set_dma_mask;
6856
6857 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6858 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6859 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6860 /*
6861 * If 32 bit DMA mask fails, then try for 64 bit mask
6862 * for FW capable of handling 64 bit DMA.
6863 */
6864 scratch_pad_1 = megasas_readl
6865 (instance, &instance->reg_set->outbound_scratch_pad_1);
6866
6867 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6868 goto fail_set_dma_mask;
6869 else if (dma_set_mask_and_coherent(&pdev->dev,
6870 DMA_BIT_MASK(63)))
6871 goto fail_set_dma_mask;
6872 }
6873 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6874 goto fail_set_dma_mask;
6875
6876 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6877 instance->consistent_mask_64bit = false;
6878 else
6879 instance->consistent_mask_64bit = true;
6880
6881 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6882 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6883 (instance->consistent_mask_64bit ? "63" : "32"));
6884
6885 return 0;
6886
6887 fail_set_dma_mask:
6888 dev_err(&pdev->dev, "Failed to set DMA mask\n");
6889 return -1;
6890
6891 }
6892
6893 /*
6894 * megasas_set_adapter_type - Set adapter type.
6895 * Supported controllers can be divided in
6896 * different categories-
6897 * enum MR_ADAPTER_TYPE {
6898 * MFI_SERIES = 1,
6899 * THUNDERBOLT_SERIES = 2,
6900 * INVADER_SERIES = 3,
6901 * VENTURA_SERIES = 4,
6902 * AERO_SERIES = 5,
6903 * };
6904 * @instance: Adapter soft state
6905 * return: void
6906 */
megasas_set_adapter_type(struct megasas_instance * instance)6907 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6908 {
6909 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6910 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6911 instance->adapter_type = MFI_SERIES;
6912 } else {
6913 switch (instance->pdev->device) {
6914 case PCI_DEVICE_ID_LSI_AERO_10E1:
6915 case PCI_DEVICE_ID_LSI_AERO_10E2:
6916 case PCI_DEVICE_ID_LSI_AERO_10E5:
6917 case PCI_DEVICE_ID_LSI_AERO_10E6:
6918 instance->adapter_type = AERO_SERIES;
6919 break;
6920 case PCI_DEVICE_ID_LSI_VENTURA:
6921 case PCI_DEVICE_ID_LSI_CRUSADER:
6922 case PCI_DEVICE_ID_LSI_HARPOON:
6923 case PCI_DEVICE_ID_LSI_TOMCAT:
6924 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6925 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6926 instance->adapter_type = VENTURA_SERIES;
6927 break;
6928 case PCI_DEVICE_ID_LSI_FUSION:
6929 case PCI_DEVICE_ID_LSI_PLASMA:
6930 instance->adapter_type = THUNDERBOLT_SERIES;
6931 break;
6932 case PCI_DEVICE_ID_LSI_INVADER:
6933 case PCI_DEVICE_ID_LSI_INTRUDER:
6934 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6935 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6936 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6937 case PCI_DEVICE_ID_LSI_FURY:
6938 instance->adapter_type = INVADER_SERIES;
6939 break;
6940 default: /* For all other supported controllers */
6941 instance->adapter_type = MFI_SERIES;
6942 break;
6943 }
6944 }
6945 }
6946
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)6947 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6948 {
6949 instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6950 sizeof(u32), &instance->producer_h, GFP_KERNEL);
6951 instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6952 sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6953
6954 if (!instance->producer || !instance->consumer) {
6955 dev_err(&instance->pdev->dev,
6956 "Failed to allocate memory for producer, consumer\n");
6957 return -1;
6958 }
6959
6960 *instance->producer = 0;
6961 *instance->consumer = 0;
6962 return 0;
6963 }
6964
6965 /**
6966 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
6967 * structures which are not common across MFI
6968 * adapters and fusion adapters.
6969 * For MFI based adapters, allocate producer and
6970 * consumer buffers. For fusion adapters, allocate
6971 * memory for fusion context.
6972 * @instance: Adapter soft state
6973 * return: 0 for SUCCESS
6974 */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)6975 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6976 {
6977 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6978 GFP_KERNEL);
6979 if (!instance->reply_map)
6980 return -ENOMEM;
6981
6982 switch (instance->adapter_type) {
6983 case MFI_SERIES:
6984 if (megasas_alloc_mfi_ctrl_mem(instance))
6985 goto fail;
6986 break;
6987 case AERO_SERIES:
6988 case VENTURA_SERIES:
6989 case THUNDERBOLT_SERIES:
6990 case INVADER_SERIES:
6991 if (megasas_alloc_fusion_context(instance))
6992 goto fail;
6993 break;
6994 }
6995
6996 return 0;
6997 fail:
6998 kfree(instance->reply_map);
6999 instance->reply_map = NULL;
7000 return -ENOMEM;
7001 }
7002
7003 /*
7004 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
7005 * producer, consumer buffers for MFI adapters
7006 *
7007 * @instance - Adapter soft instance
7008 *
7009 */
megasas_free_ctrl_mem(struct megasas_instance * instance)7010 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7011 {
7012 kfree(instance->reply_map);
7013 if (instance->adapter_type == MFI_SERIES) {
7014 if (instance->producer)
7015 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7016 instance->producer,
7017 instance->producer_h);
7018 if (instance->consumer)
7019 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7020 instance->consumer,
7021 instance->consumer_h);
7022 } else {
7023 megasas_free_fusion_context(instance);
7024 }
7025 }
7026
7027 /**
7028 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
7029 * driver load time
7030 *
7031 * @instance: Adapter soft instance
7032 *
7033 * @return: O for SUCCESS
7034 */
7035 static inline
megasas_alloc_ctrl_dma_buffers(struct megasas_instance * instance)7036 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7037 {
7038 struct pci_dev *pdev = instance->pdev;
7039 struct fusion_context *fusion = instance->ctrl_context;
7040
7041 instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7042 sizeof(struct megasas_evt_detail),
7043 &instance->evt_detail_h, GFP_KERNEL);
7044
7045 if (!instance->evt_detail) {
7046 dev_err(&instance->pdev->dev,
7047 "Failed to allocate event detail buffer\n");
7048 return -ENOMEM;
7049 }
7050
7051 if (fusion) {
7052 fusion->ioc_init_request =
7053 dma_alloc_coherent(&pdev->dev,
7054 sizeof(struct MPI2_IOC_INIT_REQUEST),
7055 &fusion->ioc_init_request_phys,
7056 GFP_KERNEL);
7057
7058 if (!fusion->ioc_init_request) {
7059 dev_err(&pdev->dev,
7060 "Failed to allocate PD list buffer\n");
7061 return -ENOMEM;
7062 }
7063
7064 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7065 sizeof(struct MR_SNAPDUMP_PROPERTIES),
7066 &instance->snapdump_prop_h, GFP_KERNEL);
7067
7068 if (!instance->snapdump_prop)
7069 dev_err(&pdev->dev,
7070 "Failed to allocate snapdump properties buffer\n");
7071
7072 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7073 HOST_DEVICE_LIST_SZ,
7074 &instance->host_device_list_buf_h,
7075 GFP_KERNEL);
7076
7077 if (!instance->host_device_list_buf) {
7078 dev_err(&pdev->dev,
7079 "Failed to allocate targetid list buffer\n");
7080 return -ENOMEM;
7081 }
7082
7083 }
7084
7085 instance->pd_list_buf =
7086 dma_alloc_coherent(&pdev->dev,
7087 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7088 &instance->pd_list_buf_h, GFP_KERNEL);
7089
7090 if (!instance->pd_list_buf) {
7091 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7092 return -ENOMEM;
7093 }
7094
7095 instance->ctrl_info_buf =
7096 dma_alloc_coherent(&pdev->dev,
7097 sizeof(struct megasas_ctrl_info),
7098 &instance->ctrl_info_buf_h, GFP_KERNEL);
7099
7100 if (!instance->ctrl_info_buf) {
7101 dev_err(&pdev->dev,
7102 "Failed to allocate controller info buffer\n");
7103 return -ENOMEM;
7104 }
7105
7106 instance->ld_list_buf =
7107 dma_alloc_coherent(&pdev->dev,
7108 sizeof(struct MR_LD_LIST),
7109 &instance->ld_list_buf_h, GFP_KERNEL);
7110
7111 if (!instance->ld_list_buf) {
7112 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7113 return -ENOMEM;
7114 }
7115
7116 instance->ld_targetid_list_buf =
7117 dma_alloc_coherent(&pdev->dev,
7118 sizeof(struct MR_LD_TARGETID_LIST),
7119 &instance->ld_targetid_list_buf_h, GFP_KERNEL);
7120
7121 if (!instance->ld_targetid_list_buf) {
7122 dev_err(&pdev->dev,
7123 "Failed to allocate LD targetid list buffer\n");
7124 return -ENOMEM;
7125 }
7126
7127 if (!reset_devices) {
7128 instance->system_info_buf =
7129 dma_alloc_coherent(&pdev->dev,
7130 sizeof(struct MR_DRV_SYSTEM_INFO),
7131 &instance->system_info_h, GFP_KERNEL);
7132 instance->pd_info =
7133 dma_alloc_coherent(&pdev->dev,
7134 sizeof(struct MR_PD_INFO),
7135 &instance->pd_info_h, GFP_KERNEL);
7136 instance->tgt_prop =
7137 dma_alloc_coherent(&pdev->dev,
7138 sizeof(struct MR_TARGET_PROPERTIES),
7139 &instance->tgt_prop_h, GFP_KERNEL);
7140 instance->crash_dump_buf =
7141 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7142 &instance->crash_dump_h, GFP_KERNEL);
7143
7144 if (!instance->system_info_buf)
7145 dev_err(&instance->pdev->dev,
7146 "Failed to allocate system info buffer\n");
7147
7148 if (!instance->pd_info)
7149 dev_err(&instance->pdev->dev,
7150 "Failed to allocate pd_info buffer\n");
7151
7152 if (!instance->tgt_prop)
7153 dev_err(&instance->pdev->dev,
7154 "Failed to allocate tgt_prop buffer\n");
7155
7156 if (!instance->crash_dump_buf)
7157 dev_err(&instance->pdev->dev,
7158 "Failed to allocate crash dump buffer\n");
7159 }
7160
7161 return 0;
7162 }
7163
7164 /*
7165 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
7166 * during driver load time
7167 *
7168 * @instance- Adapter soft instance
7169 *
7170 */
7171 static inline
megasas_free_ctrl_dma_buffers(struct megasas_instance * instance)7172 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7173 {
7174 struct pci_dev *pdev = instance->pdev;
7175 struct fusion_context *fusion = instance->ctrl_context;
7176
7177 if (instance->evt_detail)
7178 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7179 instance->evt_detail,
7180 instance->evt_detail_h);
7181
7182 if (fusion && fusion->ioc_init_request)
7183 dma_free_coherent(&pdev->dev,
7184 sizeof(struct MPI2_IOC_INIT_REQUEST),
7185 fusion->ioc_init_request,
7186 fusion->ioc_init_request_phys);
7187
7188 if (instance->pd_list_buf)
7189 dma_free_coherent(&pdev->dev,
7190 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7191 instance->pd_list_buf,
7192 instance->pd_list_buf_h);
7193
7194 if (instance->ld_list_buf)
7195 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7196 instance->ld_list_buf,
7197 instance->ld_list_buf_h);
7198
7199 if (instance->ld_targetid_list_buf)
7200 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7201 instance->ld_targetid_list_buf,
7202 instance->ld_targetid_list_buf_h);
7203
7204 if (instance->ctrl_info_buf)
7205 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7206 instance->ctrl_info_buf,
7207 instance->ctrl_info_buf_h);
7208
7209 if (instance->system_info_buf)
7210 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7211 instance->system_info_buf,
7212 instance->system_info_h);
7213
7214 if (instance->pd_info)
7215 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7216 instance->pd_info, instance->pd_info_h);
7217
7218 if (instance->tgt_prop)
7219 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7220 instance->tgt_prop, instance->tgt_prop_h);
7221
7222 if (instance->crash_dump_buf)
7223 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7224 instance->crash_dump_buf,
7225 instance->crash_dump_h);
7226
7227 if (instance->snapdump_prop)
7228 dma_free_coherent(&pdev->dev,
7229 sizeof(struct MR_SNAPDUMP_PROPERTIES),
7230 instance->snapdump_prop,
7231 instance->snapdump_prop_h);
7232
7233 if (instance->host_device_list_buf)
7234 dma_free_coherent(&pdev->dev,
7235 HOST_DEVICE_LIST_SZ,
7236 instance->host_device_list_buf,
7237 instance->host_device_list_buf_h);
7238
7239 }
7240
7241 /*
7242 * megasas_init_ctrl_params - Initialize controller's instance
7243 * parameters before FW init
7244 * @instance - Adapter soft instance
7245 * @return - void
7246 */
megasas_init_ctrl_params(struct megasas_instance * instance)7247 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7248 {
7249 instance->fw_crash_state = UNAVAILABLE;
7250
7251 megasas_poll_wait_aen = 0;
7252 instance->issuepend_done = 1;
7253 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7254
7255 /*
7256 * Initialize locks and queues
7257 */
7258 INIT_LIST_HEAD(&instance->cmd_pool);
7259 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7260
7261 atomic_set(&instance->fw_outstanding, 0);
7262 atomic64_set(&instance->total_io_count, 0);
7263
7264 init_waitqueue_head(&instance->int_cmd_wait_q);
7265 init_waitqueue_head(&instance->abort_cmd_wait_q);
7266
7267 spin_lock_init(&instance->crashdump_lock);
7268 spin_lock_init(&instance->mfi_pool_lock);
7269 spin_lock_init(&instance->hba_lock);
7270 spin_lock_init(&instance->stream_lock);
7271 spin_lock_init(&instance->completion_lock);
7272
7273 mutex_init(&instance->reset_mutex);
7274
7275 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7276 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7277 instance->flag_ieee = 1;
7278
7279 megasas_dbg_lvl = 0;
7280 instance->flag = 0;
7281 instance->unload = 1;
7282 instance->last_time = 0;
7283 instance->disableOnlineCtrlReset = 1;
7284 instance->UnevenSpanSupport = 0;
7285 instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7286 instance->msix_load_balance = false;
7287
7288 if (instance->adapter_type != MFI_SERIES)
7289 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7290 else
7291 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7292 }
7293
7294 /**
7295 * megasas_probe_one - PCI hotplug entry point
7296 * @pdev: PCI device structure
7297 * @id: PCI ids of supported hotplugged adapter
7298 */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)7299 static int megasas_probe_one(struct pci_dev *pdev,
7300 const struct pci_device_id *id)
7301 {
7302 int rval, pos;
7303 struct Scsi_Host *host;
7304 struct megasas_instance *instance;
7305 u16 control = 0;
7306
7307 switch (pdev->device) {
7308 case PCI_DEVICE_ID_LSI_AERO_10E0:
7309 case PCI_DEVICE_ID_LSI_AERO_10E3:
7310 case PCI_DEVICE_ID_LSI_AERO_10E4:
7311 case PCI_DEVICE_ID_LSI_AERO_10E7:
7312 dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7313 return 1;
7314 case PCI_DEVICE_ID_LSI_AERO_10E1:
7315 case PCI_DEVICE_ID_LSI_AERO_10E5:
7316 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7317 break;
7318 }
7319
7320 /* Reset MSI-X in the kdump kernel */
7321 if (reset_devices) {
7322 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7323 if (pos) {
7324 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7325 &control);
7326 if (control & PCI_MSIX_FLAGS_ENABLE) {
7327 dev_info(&pdev->dev, "resetting MSI-X\n");
7328 pci_write_config_word(pdev,
7329 pos + PCI_MSIX_FLAGS,
7330 control &
7331 ~PCI_MSIX_FLAGS_ENABLE);
7332 }
7333 }
7334 }
7335
7336 /*
7337 * PCI prepping: enable device set bus mastering and dma mask
7338 */
7339 rval = pci_enable_device_mem(pdev);
7340
7341 if (rval) {
7342 return rval;
7343 }
7344
7345 pci_set_master(pdev);
7346
7347 host = scsi_host_alloc(&megasas_template,
7348 sizeof(struct megasas_instance));
7349
7350 if (!host) {
7351 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7352 goto fail_alloc_instance;
7353 }
7354
7355 instance = (struct megasas_instance *)host->hostdata;
7356 memset(instance, 0, sizeof(*instance));
7357 atomic_set(&instance->fw_reset_no_pci_access, 0);
7358
7359 /*
7360 * Initialize PCI related and misc parameters
7361 */
7362 instance->pdev = pdev;
7363 instance->host = host;
7364 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7365 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7366
7367 megasas_set_adapter_type(instance);
7368
7369 /*
7370 * Initialize MFI Firmware
7371 */
7372 if (megasas_init_fw(instance))
7373 goto fail_init_mfi;
7374
7375 if (instance->requestorId) {
7376 if (instance->PlasmaFW111) {
7377 instance->vf_affiliation_111 =
7378 dma_alloc_coherent(&pdev->dev,
7379 sizeof(struct MR_LD_VF_AFFILIATION_111),
7380 &instance->vf_affiliation_111_h,
7381 GFP_KERNEL);
7382 if (!instance->vf_affiliation_111)
7383 dev_warn(&pdev->dev, "Can't allocate "
7384 "memory for VF affiliation buffer\n");
7385 } else {
7386 instance->vf_affiliation =
7387 dma_alloc_coherent(&pdev->dev,
7388 (MAX_LOGICAL_DRIVES + 1) *
7389 sizeof(struct MR_LD_VF_AFFILIATION),
7390 &instance->vf_affiliation_h,
7391 GFP_KERNEL);
7392 if (!instance->vf_affiliation)
7393 dev_warn(&pdev->dev, "Can't allocate "
7394 "memory for VF affiliation buffer\n");
7395 }
7396 }
7397
7398 /*
7399 * Store instance in PCI softstate
7400 */
7401 pci_set_drvdata(pdev, instance);
7402
7403 /*
7404 * Add this controller to megasas_mgmt_info structure so that it
7405 * can be exported to management applications
7406 */
7407 megasas_mgmt_info.count++;
7408 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7409 megasas_mgmt_info.max_index++;
7410
7411 /*
7412 * Register with SCSI mid-layer
7413 */
7414 if (megasas_io_attach(instance))
7415 goto fail_io_attach;
7416
7417 instance->unload = 0;
7418 /*
7419 * Trigger SCSI to scan our drives
7420 */
7421 if (!instance->enable_fw_dev_list ||
7422 (instance->host_device_list_buf->count > 0))
7423 scsi_scan_host(host);
7424
7425 /*
7426 * Initiate AEN (Asynchronous Event Notification)
7427 */
7428 if (megasas_start_aen(instance)) {
7429 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7430 goto fail_start_aen;
7431 }
7432
7433 megasas_setup_debugfs(instance);
7434
7435 /* Get current SR-IOV LD/VF affiliation */
7436 if (instance->requestorId)
7437 megasas_get_ld_vf_affiliation(instance, 1);
7438
7439 return 0;
7440
7441 fail_start_aen:
7442 fail_io_attach:
7443 megasas_mgmt_info.count--;
7444 megasas_mgmt_info.max_index--;
7445 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7446
7447 instance->instancet->disable_intr(instance);
7448 megasas_destroy_irqs(instance);
7449
7450 if (instance->adapter_type != MFI_SERIES)
7451 megasas_release_fusion(instance);
7452 else
7453 megasas_release_mfi(instance);
7454 if (instance->msix_vectors)
7455 pci_free_irq_vectors(instance->pdev);
7456 fail_init_mfi:
7457 scsi_host_put(host);
7458 fail_alloc_instance:
7459 pci_disable_device(pdev);
7460
7461 return -ENODEV;
7462 }
7463
7464 /**
7465 * megasas_flush_cache - Requests FW to flush all its caches
7466 * @instance: Adapter soft state
7467 */
megasas_flush_cache(struct megasas_instance * instance)7468 static void megasas_flush_cache(struct megasas_instance *instance)
7469 {
7470 struct megasas_cmd *cmd;
7471 struct megasas_dcmd_frame *dcmd;
7472
7473 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7474 return;
7475
7476 cmd = megasas_get_cmd(instance);
7477
7478 if (!cmd)
7479 return;
7480
7481 dcmd = &cmd->frame->dcmd;
7482
7483 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7484
7485 dcmd->cmd = MFI_CMD_DCMD;
7486 dcmd->cmd_status = 0x0;
7487 dcmd->sge_count = 0;
7488 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7489 dcmd->timeout = 0;
7490 dcmd->pad_0 = 0;
7491 dcmd->data_xfer_len = 0;
7492 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7493 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7494
7495 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7496 != DCMD_SUCCESS) {
7497 dev_err(&instance->pdev->dev,
7498 "return from %s %d\n", __func__, __LINE__);
7499 return;
7500 }
7501
7502 megasas_return_cmd(instance, cmd);
7503 }
7504
7505 /**
7506 * megasas_shutdown_controller - Instructs FW to shutdown the controller
7507 * @instance: Adapter soft state
7508 * @opcode: Shutdown/Hibernate
7509 */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)7510 static void megasas_shutdown_controller(struct megasas_instance *instance,
7511 u32 opcode)
7512 {
7513 struct megasas_cmd *cmd;
7514 struct megasas_dcmd_frame *dcmd;
7515
7516 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7517 return;
7518
7519 cmd = megasas_get_cmd(instance);
7520
7521 if (!cmd)
7522 return;
7523
7524 if (instance->aen_cmd)
7525 megasas_issue_blocked_abort_cmd(instance,
7526 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7527 if (instance->map_update_cmd)
7528 megasas_issue_blocked_abort_cmd(instance,
7529 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7530 if (instance->jbod_seq_cmd)
7531 megasas_issue_blocked_abort_cmd(instance,
7532 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7533
7534 dcmd = &cmd->frame->dcmd;
7535
7536 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7537
7538 dcmd->cmd = MFI_CMD_DCMD;
7539 dcmd->cmd_status = 0x0;
7540 dcmd->sge_count = 0;
7541 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7542 dcmd->timeout = 0;
7543 dcmd->pad_0 = 0;
7544 dcmd->data_xfer_len = 0;
7545 dcmd->opcode = cpu_to_le32(opcode);
7546
7547 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7548 != DCMD_SUCCESS) {
7549 dev_err(&instance->pdev->dev,
7550 "return from %s %d\n", __func__, __LINE__);
7551 return;
7552 }
7553
7554 megasas_return_cmd(instance, cmd);
7555 }
7556
7557 #ifdef CONFIG_PM
7558 /**
7559 * megasas_suspend - driver suspend entry point
7560 * @pdev: PCI device structure
7561 * @state: PCI power state to suspend routine
7562 */
7563 static int
megasas_suspend(struct pci_dev * pdev,pm_message_t state)7564 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7565 {
7566 struct megasas_instance *instance;
7567
7568 instance = pci_get_drvdata(pdev);
7569
7570 if (!instance)
7571 return 0;
7572
7573 instance->unload = 1;
7574
7575 dev_info(&pdev->dev, "%s is called\n", __func__);
7576
7577 /* Shutdown SR-IOV heartbeat timer */
7578 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7579 del_timer_sync(&instance->sriov_heartbeat_timer);
7580
7581 /* Stop the FW fault detection watchdog */
7582 if (instance->adapter_type != MFI_SERIES)
7583 megasas_fusion_stop_watchdog(instance);
7584
7585 megasas_flush_cache(instance);
7586 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7587
7588 /* cancel the delayed work if this work still in queue */
7589 if (instance->ev != NULL) {
7590 struct megasas_aen_event *ev = instance->ev;
7591 cancel_delayed_work_sync(&ev->hotplug_work);
7592 instance->ev = NULL;
7593 }
7594
7595 tasklet_kill(&instance->isr_tasklet);
7596
7597 pci_set_drvdata(instance->pdev, instance);
7598 instance->instancet->disable_intr(instance);
7599
7600 megasas_destroy_irqs(instance);
7601
7602 if (instance->msix_vectors)
7603 pci_free_irq_vectors(instance->pdev);
7604
7605 pci_save_state(pdev);
7606 pci_disable_device(pdev);
7607
7608 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7609
7610 return 0;
7611 }
7612
7613 /**
7614 * megasas_resume- driver resume entry point
7615 * @pdev: PCI device structure
7616 */
7617 static int
megasas_resume(struct pci_dev * pdev)7618 megasas_resume(struct pci_dev *pdev)
7619 {
7620 int rval;
7621 struct Scsi_Host *host;
7622 struct megasas_instance *instance;
7623 u32 status_reg;
7624
7625 instance = pci_get_drvdata(pdev);
7626
7627 if (!instance)
7628 return 0;
7629
7630 host = instance->host;
7631 pci_set_power_state(pdev, PCI_D0);
7632 pci_enable_wake(pdev, PCI_D0, 0);
7633 pci_restore_state(pdev);
7634
7635 dev_info(&pdev->dev, "%s is called\n", __func__);
7636 /*
7637 * PCI prepping: enable device set bus mastering and dma mask
7638 */
7639 rval = pci_enable_device_mem(pdev);
7640
7641 if (rval) {
7642 dev_err(&pdev->dev, "Enable device failed\n");
7643 return rval;
7644 }
7645
7646 pci_set_master(pdev);
7647
7648 /*
7649 * We expect the FW state to be READY
7650 */
7651
7652 if (megasas_transition_to_ready(instance, 0)) {
7653 dev_info(&instance->pdev->dev,
7654 "Failed to transition controller to ready from %s!\n",
7655 __func__);
7656 if (instance->adapter_type != MFI_SERIES) {
7657 status_reg =
7658 instance->instancet->read_fw_status_reg(instance);
7659 if (!(status_reg & MFI_RESET_ADAPTER) ||
7660 ((megasas_adp_reset_wait_for_ready
7661 (instance, true, 0)) == FAILED))
7662 goto fail_ready_state;
7663 } else {
7664 atomic_set(&instance->fw_reset_no_pci_access, 1);
7665 instance->instancet->adp_reset
7666 (instance, instance->reg_set);
7667 atomic_set(&instance->fw_reset_no_pci_access, 0);
7668
7669 /* waiting for about 30 seconds before retry */
7670 ssleep(30);
7671
7672 if (megasas_transition_to_ready(instance, 0))
7673 goto fail_ready_state;
7674 }
7675
7676 dev_info(&instance->pdev->dev,
7677 "FW restarted successfully from %s!\n",
7678 __func__);
7679 }
7680 if (megasas_set_dma_mask(instance))
7681 goto fail_set_dma_mask;
7682
7683 /*
7684 * Initialize MFI Firmware
7685 */
7686
7687 atomic_set(&instance->fw_outstanding, 0);
7688 atomic_set(&instance->ldio_outstanding, 0);
7689
7690 /* Now re-enable MSI-X */
7691 if (instance->msix_vectors)
7692 megasas_alloc_irq_vectors(instance);
7693
7694 if (!instance->msix_vectors) {
7695 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7696 PCI_IRQ_LEGACY);
7697 if (rval < 0)
7698 goto fail_reenable_msix;
7699 }
7700
7701 megasas_setup_reply_map(instance);
7702
7703 if (instance->adapter_type != MFI_SERIES) {
7704 megasas_reset_reply_desc(instance);
7705 if (megasas_ioc_init_fusion(instance)) {
7706 megasas_free_cmds(instance);
7707 megasas_free_cmds_fusion(instance);
7708 goto fail_init_mfi;
7709 }
7710 if (!megasas_get_map_info(instance))
7711 megasas_sync_map_info(instance);
7712 } else {
7713 *instance->producer = 0;
7714 *instance->consumer = 0;
7715 if (megasas_issue_init_mfi(instance))
7716 goto fail_init_mfi;
7717 }
7718
7719 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7720 goto fail_init_mfi;
7721
7722 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7723 (unsigned long)instance);
7724
7725 if (instance->msix_vectors ?
7726 megasas_setup_irqs_msix(instance, 0) :
7727 megasas_setup_irqs_ioapic(instance))
7728 goto fail_init_mfi;
7729
7730 if (instance->adapter_type != MFI_SERIES)
7731 megasas_setup_irq_poll(instance);
7732
7733 /* Re-launch SR-IOV heartbeat timer */
7734 if (instance->requestorId) {
7735 if (!megasas_sriov_start_heartbeat(instance, 0))
7736 megasas_start_timer(instance);
7737 else {
7738 instance->skip_heartbeat_timer_del = 1;
7739 goto fail_init_mfi;
7740 }
7741 }
7742
7743 instance->instancet->enable_intr(instance);
7744 megasas_setup_jbod_map(instance);
7745 instance->unload = 0;
7746
7747 /*
7748 * Initiate AEN (Asynchronous Event Notification)
7749 */
7750 if (megasas_start_aen(instance))
7751 dev_err(&instance->pdev->dev, "Start AEN failed\n");
7752
7753 /* Re-launch FW fault watchdog */
7754 if (instance->adapter_type != MFI_SERIES)
7755 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7756 goto fail_start_watchdog;
7757
7758 return 0;
7759
7760 fail_start_watchdog:
7761 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7762 del_timer_sync(&instance->sriov_heartbeat_timer);
7763 fail_init_mfi:
7764 megasas_free_ctrl_dma_buffers(instance);
7765 megasas_free_ctrl_mem(instance);
7766 scsi_host_put(host);
7767
7768 fail_reenable_msix:
7769 fail_set_dma_mask:
7770 fail_ready_state:
7771
7772 pci_disable_device(pdev);
7773
7774 return -ENODEV;
7775 }
7776 #else
7777 #define megasas_suspend NULL
7778 #define megasas_resume NULL
7779 #endif
7780
7781 static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)7782 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7783 {
7784 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7785 int i;
7786 u8 adp_state;
7787
7788 for (i = 0; i < wait_time; i++) {
7789 adp_state = atomic_read(&instance->adprecovery);
7790 if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7791 (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7792 break;
7793
7794 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7795 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7796
7797 msleep(1000);
7798 }
7799
7800 if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7801 dev_info(&instance->pdev->dev,
7802 "%s HBA failed to become operational, adp_state %d\n",
7803 __func__, adp_state);
7804 return 1;
7805 }
7806
7807 return 0;
7808 }
7809
7810 /**
7811 * megasas_detach_one - PCI hot"un"plug entry point
7812 * @pdev: PCI device structure
7813 */
megasas_detach_one(struct pci_dev * pdev)7814 static void megasas_detach_one(struct pci_dev *pdev)
7815 {
7816 int i;
7817 struct Scsi_Host *host;
7818 struct megasas_instance *instance;
7819 struct fusion_context *fusion;
7820 u32 pd_seq_map_sz;
7821
7822 instance = pci_get_drvdata(pdev);
7823
7824 if (!instance)
7825 return;
7826
7827 host = instance->host;
7828 fusion = instance->ctrl_context;
7829
7830 /* Shutdown SR-IOV heartbeat timer */
7831 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7832 del_timer_sync(&instance->sriov_heartbeat_timer);
7833
7834 /* Stop the FW fault detection watchdog */
7835 if (instance->adapter_type != MFI_SERIES)
7836 megasas_fusion_stop_watchdog(instance);
7837
7838 if (instance->fw_crash_state != UNAVAILABLE)
7839 megasas_free_host_crash_buffer(instance);
7840 scsi_remove_host(instance->host);
7841 instance->unload = 1;
7842
7843 if (megasas_wait_for_adapter_operational(instance))
7844 goto skip_firing_dcmds;
7845
7846 megasas_flush_cache(instance);
7847 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7848
7849 skip_firing_dcmds:
7850 /* cancel the delayed work if this work still in queue*/
7851 if (instance->ev != NULL) {
7852 struct megasas_aen_event *ev = instance->ev;
7853 cancel_delayed_work_sync(&ev->hotplug_work);
7854 instance->ev = NULL;
7855 }
7856
7857 /* cancel all wait events */
7858 wake_up_all(&instance->int_cmd_wait_q);
7859
7860 tasklet_kill(&instance->isr_tasklet);
7861
7862 /*
7863 * Take the instance off the instance array. Note that we will not
7864 * decrement the max_index. We let this array be sparse array
7865 */
7866 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7867 if (megasas_mgmt_info.instance[i] == instance) {
7868 megasas_mgmt_info.count--;
7869 megasas_mgmt_info.instance[i] = NULL;
7870
7871 break;
7872 }
7873 }
7874
7875 instance->instancet->disable_intr(instance);
7876
7877 megasas_destroy_irqs(instance);
7878
7879 if (instance->msix_vectors)
7880 pci_free_irq_vectors(instance->pdev);
7881
7882 if (instance->adapter_type >= VENTURA_SERIES) {
7883 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7884 kfree(fusion->stream_detect_by_ld[i]);
7885 kfree(fusion->stream_detect_by_ld);
7886 fusion->stream_detect_by_ld = NULL;
7887 }
7888
7889
7890 if (instance->adapter_type != MFI_SERIES) {
7891 megasas_release_fusion(instance);
7892 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7893 (sizeof(struct MR_PD_CFG_SEQ) *
7894 (MAX_PHYSICAL_DEVICES - 1));
7895 for (i = 0; i < 2 ; i++) {
7896 if (fusion->ld_map[i])
7897 dma_free_coherent(&instance->pdev->dev,
7898 fusion->max_map_sz,
7899 fusion->ld_map[i],
7900 fusion->ld_map_phys[i]);
7901 if (fusion->ld_drv_map[i]) {
7902 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7903 vfree(fusion->ld_drv_map[i]);
7904 else
7905 free_pages((ulong)fusion->ld_drv_map[i],
7906 fusion->drv_map_pages);
7907 }
7908
7909 if (fusion->pd_seq_sync[i])
7910 dma_free_coherent(&instance->pdev->dev,
7911 pd_seq_map_sz,
7912 fusion->pd_seq_sync[i],
7913 fusion->pd_seq_phys[i]);
7914 }
7915 } else {
7916 megasas_release_mfi(instance);
7917 }
7918
7919 if (instance->vf_affiliation)
7920 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7921 sizeof(struct MR_LD_VF_AFFILIATION),
7922 instance->vf_affiliation,
7923 instance->vf_affiliation_h);
7924
7925 if (instance->vf_affiliation_111)
7926 dma_free_coherent(&pdev->dev,
7927 sizeof(struct MR_LD_VF_AFFILIATION_111),
7928 instance->vf_affiliation_111,
7929 instance->vf_affiliation_111_h);
7930
7931 if (instance->hb_host_mem)
7932 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7933 instance->hb_host_mem,
7934 instance->hb_host_mem_h);
7935
7936 megasas_free_ctrl_dma_buffers(instance);
7937
7938 megasas_free_ctrl_mem(instance);
7939
7940 megasas_destroy_debugfs(instance);
7941
7942 scsi_host_put(host);
7943
7944 pci_disable_device(pdev);
7945 }
7946
7947 /**
7948 * megasas_shutdown - Shutdown entry point
7949 * @pdev: Generic device structure
7950 */
megasas_shutdown(struct pci_dev * pdev)7951 static void megasas_shutdown(struct pci_dev *pdev)
7952 {
7953 struct megasas_instance *instance = pci_get_drvdata(pdev);
7954
7955 if (!instance)
7956 return;
7957
7958 instance->unload = 1;
7959
7960 if (megasas_wait_for_adapter_operational(instance))
7961 goto skip_firing_dcmds;
7962
7963 megasas_flush_cache(instance);
7964 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7965
7966 skip_firing_dcmds:
7967 instance->instancet->disable_intr(instance);
7968 megasas_destroy_irqs(instance);
7969
7970 if (instance->msix_vectors)
7971 pci_free_irq_vectors(instance->pdev);
7972 }
7973
7974 /*
7975 * megasas_mgmt_open - char node "open" entry point
7976 * @inode: char node inode
7977 * @filep: char node file
7978 */
megasas_mgmt_open(struct inode * inode,struct file * filep)7979 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7980 {
7981 /*
7982 * Allow only those users with admin rights
7983 */
7984 if (!capable(CAP_SYS_ADMIN))
7985 return -EACCES;
7986
7987 return 0;
7988 }
7989
7990 /*
7991 * megasas_mgmt_fasync - Async notifier registration from applications
7992 * @fd: char node file descriptor number
7993 * @filep: char node file
7994 * @mode: notifier on/off
7995 *
7996 * This function adds the calling process to a driver global queue. When an
7997 * event occurs, SIGIO will be sent to all processes in this queue.
7998 */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)7999 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8000 {
8001 int rc;
8002
8003 mutex_lock(&megasas_async_queue_mutex);
8004
8005 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8006
8007 mutex_unlock(&megasas_async_queue_mutex);
8008
8009 if (rc >= 0) {
8010 /* For sanity check when we get ioctl */
8011 filep->private_data = filep;
8012 return 0;
8013 }
8014
8015 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8016
8017 return rc;
8018 }
8019
8020 /*
8021 * megasas_mgmt_poll - char node "poll" entry point
8022 * @filep: char node file
8023 * @wait: Events to poll for
8024 */
megasas_mgmt_poll(struct file * file,poll_table * wait)8025 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8026 {
8027 __poll_t mask;
8028 unsigned long flags;
8029
8030 poll_wait(file, &megasas_poll_wait, wait);
8031 spin_lock_irqsave(&poll_aen_lock, flags);
8032 if (megasas_poll_wait_aen)
8033 mask = (EPOLLIN | EPOLLRDNORM);
8034 else
8035 mask = 0;
8036 megasas_poll_wait_aen = 0;
8037 spin_unlock_irqrestore(&poll_aen_lock, flags);
8038 return mask;
8039 }
8040
8041 /*
8042 * megasas_set_crash_dump_params_ioctl:
8043 * Send CRASH_DUMP_MODE DCMD to all controllers
8044 * @cmd: MFI command frame
8045 */
8046
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)8047 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8048 {
8049 struct megasas_instance *local_instance;
8050 int i, error = 0;
8051 int crash_support;
8052
8053 crash_support = cmd->frame->dcmd.mbox.w[0];
8054
8055 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8056 local_instance = megasas_mgmt_info.instance[i];
8057 if (local_instance && local_instance->crash_dump_drv_support) {
8058 if ((atomic_read(&local_instance->adprecovery) ==
8059 MEGASAS_HBA_OPERATIONAL) &&
8060 !megasas_set_crash_dump_params(local_instance,
8061 crash_support)) {
8062 local_instance->crash_dump_app_support =
8063 crash_support;
8064 dev_info(&local_instance->pdev->dev,
8065 "Application firmware crash "
8066 "dump mode set success\n");
8067 error = 0;
8068 } else {
8069 dev_info(&local_instance->pdev->dev,
8070 "Application firmware crash "
8071 "dump mode set failed\n");
8072 error = -1;
8073 }
8074 }
8075 }
8076 return error;
8077 }
8078
8079 /**
8080 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
8081 * @instance: Adapter soft state
8082 * @user_ioc: User's ioctl packet
8083 * @ioc: ioctl packet
8084 */
8085 static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)8086 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8087 struct megasas_iocpacket __user * user_ioc,
8088 struct megasas_iocpacket *ioc)
8089 {
8090 struct megasas_sge64 *kern_sge64 = NULL;
8091 struct megasas_sge32 *kern_sge32 = NULL;
8092 struct megasas_cmd *cmd;
8093 void *kbuff_arr[MAX_IOCTL_SGE];
8094 dma_addr_t buf_handle = 0;
8095 int error = 0, i;
8096 void *sense = NULL;
8097 dma_addr_t sense_handle;
8098 unsigned long *sense_ptr;
8099 u32 opcode = 0;
8100 int ret = DCMD_SUCCESS;
8101
8102 memset(kbuff_arr, 0, sizeof(kbuff_arr));
8103
8104 if (ioc->sge_count > MAX_IOCTL_SGE) {
8105 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
8106 ioc->sge_count, MAX_IOCTL_SGE);
8107 return -EINVAL;
8108 }
8109
8110 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8111 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8112 !instance->support_nvme_passthru) ||
8113 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8114 !instance->support_pci_lane_margining)) {
8115 dev_err(&instance->pdev->dev,
8116 "Received invalid ioctl command 0x%x\n",
8117 ioc->frame.hdr.cmd);
8118 return -ENOTSUPP;
8119 }
8120
8121 cmd = megasas_get_cmd(instance);
8122 if (!cmd) {
8123 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8124 return -ENOMEM;
8125 }
8126
8127 /*
8128 * User's IOCTL packet has 2 frames (maximum). Copy those two
8129 * frames into our cmd's frames. cmd->frame's context will get
8130 * overwritten when we copy from user's frames. So set that value
8131 * alone separately
8132 */
8133 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8134 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8135 cmd->frame->hdr.pad_0 = 0;
8136
8137 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8138
8139 if (instance->consistent_mask_64bit)
8140 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8141 MFI_FRAME_SENSE64));
8142 else
8143 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8144 MFI_FRAME_SENSE64));
8145
8146 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8147 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8148
8149 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8150 mutex_lock(&instance->reset_mutex);
8151 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8152 megasas_return_cmd(instance, cmd);
8153 mutex_unlock(&instance->reset_mutex);
8154 return -1;
8155 }
8156 mutex_unlock(&instance->reset_mutex);
8157 }
8158
8159 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8160 error = megasas_set_crash_dump_params_ioctl(cmd);
8161 megasas_return_cmd(instance, cmd);
8162 return error;
8163 }
8164
8165 /*
8166 * The management interface between applications and the fw uses
8167 * MFI frames. E.g, RAID configuration changes, LD property changes
8168 * etc are accomplishes through different kinds of MFI frames. The
8169 * driver needs to care only about substituting user buffers with
8170 * kernel buffers in SGLs. The location of SGL is embedded in the
8171 * struct iocpacket itself.
8172 */
8173 if (instance->consistent_mask_64bit)
8174 kern_sge64 = (struct megasas_sge64 *)
8175 ((unsigned long)cmd->frame + ioc->sgl_off);
8176 else
8177 kern_sge32 = (struct megasas_sge32 *)
8178 ((unsigned long)cmd->frame + ioc->sgl_off);
8179
8180 /*
8181 * For each user buffer, create a mirror buffer and copy in
8182 */
8183 for (i = 0; i < ioc->sge_count; i++) {
8184 if (!ioc->sgl[i].iov_len)
8185 continue;
8186
8187 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8188 ioc->sgl[i].iov_len,
8189 &buf_handle, GFP_KERNEL);
8190 if (!kbuff_arr[i]) {
8191 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8192 "kernel SGL buffer for IOCTL\n");
8193 error = -ENOMEM;
8194 goto out;
8195 }
8196
8197 /*
8198 * We don't change the dma_coherent_mask, so
8199 * dma_alloc_coherent only returns 32bit addresses
8200 */
8201 if (instance->consistent_mask_64bit) {
8202 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8203 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8204 } else {
8205 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8206 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8207 }
8208
8209 /*
8210 * We created a kernel buffer corresponding to the
8211 * user buffer. Now copy in from the user buffer
8212 */
8213 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8214 (u32) (ioc->sgl[i].iov_len))) {
8215 error = -EFAULT;
8216 goto out;
8217 }
8218 }
8219
8220 if (ioc->sense_len) {
8221 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8222 &sense_handle, GFP_KERNEL);
8223 if (!sense) {
8224 error = -ENOMEM;
8225 goto out;
8226 }
8227
8228 sense_ptr =
8229 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8230 if (instance->consistent_mask_64bit)
8231 *sense_ptr = cpu_to_le64(sense_handle);
8232 else
8233 *sense_ptr = cpu_to_le32(sense_handle);
8234 }
8235
8236 /*
8237 * Set the sync_cmd flag so that the ISR knows not to complete this
8238 * cmd to the SCSI mid-layer
8239 */
8240 cmd->sync_cmd = 1;
8241
8242 ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8243 switch (ret) {
8244 case DCMD_INIT:
8245 case DCMD_BUSY:
8246 cmd->sync_cmd = 0;
8247 dev_err(&instance->pdev->dev,
8248 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8249 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8250 cmd->cmd_status_drv);
8251 error = -EBUSY;
8252 goto out;
8253 }
8254
8255 cmd->sync_cmd = 0;
8256
8257 if (instance->unload == 1) {
8258 dev_info(&instance->pdev->dev, "Driver unload is in progress "
8259 "don't submit data to application\n");
8260 goto out;
8261 }
8262 /*
8263 * copy out the kernel buffers to user buffers
8264 */
8265 for (i = 0; i < ioc->sge_count; i++) {
8266 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8267 ioc->sgl[i].iov_len)) {
8268 error = -EFAULT;
8269 goto out;
8270 }
8271 }
8272
8273 /*
8274 * copy out the sense
8275 */
8276 if (ioc->sense_len) {
8277 /*
8278 * sense_ptr points to the location that has the user
8279 * sense buffer address
8280 */
8281 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8282 ioc->sense_off);
8283
8284 if (copy_to_user((void __user *)((unsigned long)
8285 get_unaligned((unsigned long *)sense_ptr)),
8286 sense, ioc->sense_len)) {
8287 dev_err(&instance->pdev->dev, "Failed to copy out to user "
8288 "sense data\n");
8289 error = -EFAULT;
8290 goto out;
8291 }
8292 }
8293
8294 /*
8295 * copy the status codes returned by the fw
8296 */
8297 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8298 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8299 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8300 error = -EFAULT;
8301 }
8302
8303 out:
8304 if (sense) {
8305 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8306 sense, sense_handle);
8307 }
8308
8309 for (i = 0; i < ioc->sge_count; i++) {
8310 if (kbuff_arr[i]) {
8311 if (instance->consistent_mask_64bit)
8312 dma_free_coherent(&instance->pdev->dev,
8313 le32_to_cpu(kern_sge64[i].length),
8314 kbuff_arr[i],
8315 le64_to_cpu(kern_sge64[i].phys_addr));
8316 else
8317 dma_free_coherent(&instance->pdev->dev,
8318 le32_to_cpu(kern_sge32[i].length),
8319 kbuff_arr[i],
8320 le32_to_cpu(kern_sge32[i].phys_addr));
8321 kbuff_arr[i] = NULL;
8322 }
8323 }
8324
8325 megasas_return_cmd(instance, cmd);
8326 return error;
8327 }
8328
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)8329 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8330 {
8331 struct megasas_iocpacket __user *user_ioc =
8332 (struct megasas_iocpacket __user *)arg;
8333 struct megasas_iocpacket *ioc;
8334 struct megasas_instance *instance;
8335 int error;
8336
8337 ioc = memdup_user(user_ioc, sizeof(*ioc));
8338 if (IS_ERR(ioc))
8339 return PTR_ERR(ioc);
8340
8341 instance = megasas_lookup_instance(ioc->host_no);
8342 if (!instance) {
8343 error = -ENODEV;
8344 goto out_kfree_ioc;
8345 }
8346
8347 /* Block ioctls in VF mode */
8348 if (instance->requestorId && !allow_vf_ioctls) {
8349 error = -ENODEV;
8350 goto out_kfree_ioc;
8351 }
8352
8353 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8354 dev_err(&instance->pdev->dev, "Controller in crit error\n");
8355 error = -ENODEV;
8356 goto out_kfree_ioc;
8357 }
8358
8359 if (instance->unload == 1) {
8360 error = -ENODEV;
8361 goto out_kfree_ioc;
8362 }
8363
8364 if (down_interruptible(&instance->ioctl_sem)) {
8365 error = -ERESTARTSYS;
8366 goto out_kfree_ioc;
8367 }
8368
8369 if (megasas_wait_for_adapter_operational(instance)) {
8370 error = -ENODEV;
8371 goto out_up;
8372 }
8373
8374 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8375 out_up:
8376 up(&instance->ioctl_sem);
8377
8378 out_kfree_ioc:
8379 kfree(ioc);
8380 return error;
8381 }
8382
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)8383 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8384 {
8385 struct megasas_instance *instance;
8386 struct megasas_aen aen;
8387 int error;
8388
8389 if (file->private_data != file) {
8390 printk(KERN_DEBUG "megasas: fasync_helper was not "
8391 "called first\n");
8392 return -EINVAL;
8393 }
8394
8395 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8396 return -EFAULT;
8397
8398 instance = megasas_lookup_instance(aen.host_no);
8399
8400 if (!instance)
8401 return -ENODEV;
8402
8403 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8404 return -ENODEV;
8405 }
8406
8407 if (instance->unload == 1) {
8408 return -ENODEV;
8409 }
8410
8411 if (megasas_wait_for_adapter_operational(instance))
8412 return -ENODEV;
8413
8414 mutex_lock(&instance->reset_mutex);
8415 error = megasas_register_aen(instance, aen.seq_num,
8416 aen.class_locale_word);
8417 mutex_unlock(&instance->reset_mutex);
8418 return error;
8419 }
8420
8421 /**
8422 * megasas_mgmt_ioctl - char node ioctl entry point
8423 * @file: char device file pointer
8424 * @cmd: ioctl command
8425 * @arg: ioctl command arguments address
8426 */
8427 static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8428 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8429 {
8430 switch (cmd) {
8431 case MEGASAS_IOC_FIRMWARE:
8432 return megasas_mgmt_ioctl_fw(file, arg);
8433
8434 case MEGASAS_IOC_GET_AEN:
8435 return megasas_mgmt_ioctl_aen(file, arg);
8436 }
8437
8438 return -ENOTTY;
8439 }
8440
8441 #ifdef CONFIG_COMPAT
megasas_mgmt_compat_ioctl_fw(struct file * file,unsigned long arg)8442 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8443 {
8444 struct compat_megasas_iocpacket __user *cioc =
8445 (struct compat_megasas_iocpacket __user *)arg;
8446 struct megasas_iocpacket __user *ioc =
8447 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8448 int i;
8449 int error = 0;
8450 compat_uptr_t ptr;
8451 u32 local_sense_off;
8452 u32 local_sense_len;
8453 u32 user_sense_off;
8454
8455 if (clear_user(ioc, sizeof(*ioc)))
8456 return -EFAULT;
8457
8458 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8459 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8460 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8461 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8462 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8463 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8464 return -EFAULT;
8465
8466 /*
8467 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8468 * sense_len is not null, so prepare the 64bit value under
8469 * the same condition.
8470 */
8471 if (get_user(local_sense_off, &ioc->sense_off) ||
8472 get_user(local_sense_len, &ioc->sense_len) ||
8473 get_user(user_sense_off, &cioc->sense_off))
8474 return -EFAULT;
8475
8476 if (local_sense_off != user_sense_off)
8477 return -EINVAL;
8478
8479 if (local_sense_len) {
8480 void __user **sense_ioc_ptr =
8481 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8482 compat_uptr_t *sense_cioc_ptr =
8483 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8484 if (get_user(ptr, sense_cioc_ptr) ||
8485 put_user(compat_ptr(ptr), sense_ioc_ptr))
8486 return -EFAULT;
8487 }
8488
8489 for (i = 0; i < MAX_IOCTL_SGE; i++) {
8490 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8491 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8492 copy_in_user(&ioc->sgl[i].iov_len,
8493 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8494 return -EFAULT;
8495 }
8496
8497 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8498
8499 if (copy_in_user(&cioc->frame.hdr.cmd_status,
8500 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8501 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8502 return -EFAULT;
8503 }
8504 return error;
8505 }
8506
8507 static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8508 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8509 unsigned long arg)
8510 {
8511 switch (cmd) {
8512 case MEGASAS_IOC_FIRMWARE32:
8513 return megasas_mgmt_compat_ioctl_fw(file, arg);
8514 case MEGASAS_IOC_GET_AEN:
8515 return megasas_mgmt_ioctl_aen(file, arg);
8516 }
8517
8518 return -ENOTTY;
8519 }
8520 #endif
8521
8522 /*
8523 * File operations structure for management interface
8524 */
8525 static const struct file_operations megasas_mgmt_fops = {
8526 .owner = THIS_MODULE,
8527 .open = megasas_mgmt_open,
8528 .fasync = megasas_mgmt_fasync,
8529 .unlocked_ioctl = megasas_mgmt_ioctl,
8530 .poll = megasas_mgmt_poll,
8531 #ifdef CONFIG_COMPAT
8532 .compat_ioctl = megasas_mgmt_compat_ioctl,
8533 #endif
8534 .llseek = noop_llseek,
8535 };
8536
8537 /*
8538 * PCI hotplug support registration structure
8539 */
8540 static struct pci_driver megasas_pci_driver = {
8541
8542 .name = "megaraid_sas",
8543 .id_table = megasas_pci_table,
8544 .probe = megasas_probe_one,
8545 .remove = megasas_detach_one,
8546 .suspend = megasas_suspend,
8547 .resume = megasas_resume,
8548 .shutdown = megasas_shutdown,
8549 };
8550
8551 /*
8552 * Sysfs driver attributes
8553 */
version_show(struct device_driver * dd,char * buf)8554 static ssize_t version_show(struct device_driver *dd, char *buf)
8555 {
8556 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8557 MEGASAS_VERSION);
8558 }
8559 static DRIVER_ATTR_RO(version);
8560
release_date_show(struct device_driver * dd,char * buf)8561 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8562 {
8563 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8564 MEGASAS_RELDATE);
8565 }
8566 static DRIVER_ATTR_RO(release_date);
8567
support_poll_for_event_show(struct device_driver * dd,char * buf)8568 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8569 {
8570 return sprintf(buf, "%u\n", support_poll_for_event);
8571 }
8572 static DRIVER_ATTR_RO(support_poll_for_event);
8573
support_device_change_show(struct device_driver * dd,char * buf)8574 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8575 {
8576 return sprintf(buf, "%u\n", support_device_change);
8577 }
8578 static DRIVER_ATTR_RO(support_device_change);
8579
dbg_lvl_show(struct device_driver * dd,char * buf)8580 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8581 {
8582 return sprintf(buf, "%u\n", megasas_dbg_lvl);
8583 }
8584
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)8585 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8586 size_t count)
8587 {
8588 int retval = count;
8589
8590 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8591 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8592 retval = -EINVAL;
8593 }
8594 return retval;
8595 }
8596 static DRIVER_ATTR_RW(dbg_lvl);
8597
8598 static ssize_t
support_nvme_encapsulation_show(struct device_driver * dd,char * buf)8599 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8600 {
8601 return sprintf(buf, "%u\n", support_nvme_encapsulation);
8602 }
8603
8604 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8605
8606 static ssize_t
support_pci_lane_margining_show(struct device_driver * dd,char * buf)8607 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8608 {
8609 return sprintf(buf, "%u\n", support_pci_lane_margining);
8610 }
8611
8612 static DRIVER_ATTR_RO(support_pci_lane_margining);
8613
megasas_remove_scsi_device(struct scsi_device * sdev)8614 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8615 {
8616 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8617 scsi_remove_device(sdev);
8618 scsi_device_put(sdev);
8619 }
8620
8621 /**
8622 * megasas_update_device_list - Update the PD and LD device list from FW
8623 * after an AEN event notification
8624 * @instance: Adapter soft state
8625 * @event_type: Indicates type of event (PD or LD event)
8626 *
8627 * @return: Success or failure
8628 *
8629 * Issue DCMDs to Firmware to update the internal device list in driver.
8630 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8631 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8632 */
8633 static
megasas_update_device_list(struct megasas_instance * instance,int event_type)8634 int megasas_update_device_list(struct megasas_instance *instance,
8635 int event_type)
8636 {
8637 int dcmd_ret = DCMD_SUCCESS;
8638
8639 if (instance->enable_fw_dev_list) {
8640 dcmd_ret = megasas_host_device_list_query(instance, false);
8641 if (dcmd_ret != DCMD_SUCCESS)
8642 goto out;
8643 } else {
8644 if (event_type & SCAN_PD_CHANNEL) {
8645 dcmd_ret = megasas_get_pd_list(instance);
8646
8647 if (dcmd_ret != DCMD_SUCCESS)
8648 goto out;
8649 }
8650
8651 if (event_type & SCAN_VD_CHANNEL) {
8652 if (!instance->requestorId ||
8653 (instance->requestorId &&
8654 megasas_get_ld_vf_affiliation(instance, 0))) {
8655 dcmd_ret = megasas_ld_list_query(instance,
8656 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8657 if (dcmd_ret != DCMD_SUCCESS)
8658 goto out;
8659 }
8660 }
8661 }
8662
8663 out:
8664 return dcmd_ret;
8665 }
8666
8667 /**
8668 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
8669 * after an AEN event notification
8670 * @instance: Adapter soft state
8671 * @scan_type: Indicates type of devices (PD/LD) to add
8672 * @return void
8673 */
8674 static
megasas_add_remove_devices(struct megasas_instance * instance,int scan_type)8675 void megasas_add_remove_devices(struct megasas_instance *instance,
8676 int scan_type)
8677 {
8678 int i, j;
8679 u16 pd_index = 0;
8680 u16 ld_index = 0;
8681 u16 channel = 0, id = 0;
8682 struct Scsi_Host *host;
8683 struct scsi_device *sdev1;
8684 struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8685 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8686
8687 host = instance->host;
8688
8689 if (instance->enable_fw_dev_list) {
8690 targetid_list = instance->host_device_list_buf;
8691 for (i = 0; i < targetid_list->count; i++) {
8692 targetid_entry = &targetid_list->host_device_list[i];
8693 if (targetid_entry->flags.u.bits.is_sys_pd) {
8694 channel = le16_to_cpu(targetid_entry->target_id) /
8695 MEGASAS_MAX_DEV_PER_CHANNEL;
8696 id = le16_to_cpu(targetid_entry->target_id) %
8697 MEGASAS_MAX_DEV_PER_CHANNEL;
8698 } else {
8699 channel = MEGASAS_MAX_PD_CHANNELS +
8700 (le16_to_cpu(targetid_entry->target_id) /
8701 MEGASAS_MAX_DEV_PER_CHANNEL);
8702 id = le16_to_cpu(targetid_entry->target_id) %
8703 MEGASAS_MAX_DEV_PER_CHANNEL;
8704 }
8705 sdev1 = scsi_device_lookup(host, channel, id, 0);
8706 if (!sdev1) {
8707 scsi_add_device(host, channel, id, 0);
8708 } else {
8709 scsi_device_put(sdev1);
8710 }
8711 }
8712 }
8713
8714 if (scan_type & SCAN_PD_CHANNEL) {
8715 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8716 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8717 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8718 sdev1 = scsi_device_lookup(host, i, j, 0);
8719 if (instance->pd_list[pd_index].driveState ==
8720 MR_PD_STATE_SYSTEM) {
8721 if (!sdev1)
8722 scsi_add_device(host, i, j, 0);
8723 else
8724 scsi_device_put(sdev1);
8725 } else {
8726 if (sdev1)
8727 megasas_remove_scsi_device(sdev1);
8728 }
8729 }
8730 }
8731 }
8732
8733 if (scan_type & SCAN_VD_CHANNEL) {
8734 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8735 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8736 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8737 sdev1 = scsi_device_lookup(host,
8738 MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8739 if (instance->ld_ids[ld_index] != 0xff) {
8740 if (!sdev1)
8741 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8742 else
8743 scsi_device_put(sdev1);
8744 } else {
8745 if (sdev1)
8746 megasas_remove_scsi_device(sdev1);
8747 }
8748 }
8749 }
8750 }
8751
8752 }
8753
8754 static void
megasas_aen_polling(struct work_struct * work)8755 megasas_aen_polling(struct work_struct *work)
8756 {
8757 struct megasas_aen_event *ev =
8758 container_of(work, struct megasas_aen_event, hotplug_work.work);
8759 struct megasas_instance *instance = ev->instance;
8760 union megasas_evt_class_locale class_locale;
8761 int event_type = 0;
8762 u32 seq_num;
8763 int error;
8764 u8 dcmd_ret = DCMD_SUCCESS;
8765
8766 if (!instance) {
8767 printk(KERN_ERR "invalid instance!\n");
8768 kfree(ev);
8769 return;
8770 }
8771
8772 /* Don't run the event workqueue thread if OCR is running */
8773 mutex_lock(&instance->reset_mutex);
8774
8775 instance->ev = NULL;
8776 if (instance->evt_detail) {
8777 megasas_decode_evt(instance);
8778
8779 switch (le32_to_cpu(instance->evt_detail->code)) {
8780
8781 case MR_EVT_PD_INSERTED:
8782 case MR_EVT_PD_REMOVED:
8783 event_type = SCAN_PD_CHANNEL;
8784 break;
8785
8786 case MR_EVT_LD_OFFLINE:
8787 case MR_EVT_CFG_CLEARED:
8788 case MR_EVT_LD_DELETED:
8789 case MR_EVT_LD_CREATED:
8790 event_type = SCAN_VD_CHANNEL;
8791 break;
8792
8793 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8794 case MR_EVT_FOREIGN_CFG_IMPORTED:
8795 case MR_EVT_LD_STATE_CHANGE:
8796 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8797 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8798 instance->host->host_no);
8799 break;
8800
8801 case MR_EVT_CTRL_PROP_CHANGED:
8802 dcmd_ret = megasas_get_ctrl_info(instance);
8803 if (dcmd_ret == DCMD_SUCCESS &&
8804 instance->snapdump_wait_time) {
8805 megasas_get_snapdump_properties(instance);
8806 dev_info(&instance->pdev->dev,
8807 "Snap dump wait time\t: %d\n",
8808 instance->snapdump_wait_time);
8809 }
8810 break;
8811 default:
8812 event_type = 0;
8813 break;
8814 }
8815 } else {
8816 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8817 mutex_unlock(&instance->reset_mutex);
8818 kfree(ev);
8819 return;
8820 }
8821
8822 if (event_type)
8823 dcmd_ret = megasas_update_device_list(instance, event_type);
8824
8825 mutex_unlock(&instance->reset_mutex);
8826
8827 if (event_type && dcmd_ret == DCMD_SUCCESS)
8828 megasas_add_remove_devices(instance, event_type);
8829
8830 if (dcmd_ret == DCMD_SUCCESS)
8831 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8832 else
8833 seq_num = instance->last_seq_num;
8834
8835 /* Register AEN with FW for latest sequence number plus 1 */
8836 class_locale.members.reserved = 0;
8837 class_locale.members.locale = MR_EVT_LOCALE_ALL;
8838 class_locale.members.class = MR_EVT_CLASS_DEBUG;
8839
8840 if (instance->aen_cmd != NULL) {
8841 kfree(ev);
8842 return;
8843 }
8844
8845 mutex_lock(&instance->reset_mutex);
8846 error = megasas_register_aen(instance, seq_num,
8847 class_locale.word);
8848 if (error)
8849 dev_err(&instance->pdev->dev,
8850 "register aen failed error %x\n", error);
8851
8852 mutex_unlock(&instance->reset_mutex);
8853 kfree(ev);
8854 }
8855
8856 /**
8857 * megasas_init - Driver load entry point
8858 */
megasas_init(void)8859 static int __init megasas_init(void)
8860 {
8861 int rval;
8862
8863 /*
8864 * Booted in kdump kernel, minimize memory footprints by
8865 * disabling few features
8866 */
8867 if (reset_devices) {
8868 msix_vectors = 1;
8869 rdpq_enable = 0;
8870 dual_qdepth_disable = 1;
8871 }
8872
8873 /*
8874 * Announce driver version and other information
8875 */
8876 pr_info("megasas: %s\n", MEGASAS_VERSION);
8877
8878 spin_lock_init(&poll_aen_lock);
8879
8880 support_poll_for_event = 2;
8881 support_device_change = 1;
8882 support_nvme_encapsulation = true;
8883 support_pci_lane_margining = true;
8884
8885 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8886
8887 /*
8888 * Register character device node
8889 */
8890 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8891
8892 if (rval < 0) {
8893 printk(KERN_DEBUG "megasas: failed to open device node\n");
8894 return rval;
8895 }
8896
8897 megasas_mgmt_majorno = rval;
8898
8899 megasas_init_debugfs();
8900
8901 /*
8902 * Register ourselves as PCI hotplug module
8903 */
8904 rval = pci_register_driver(&megasas_pci_driver);
8905
8906 if (rval) {
8907 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8908 goto err_pcidrv;
8909 }
8910
8911 if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8912 (event_log_level > MFI_EVT_CLASS_DEAD)) {
8913 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8914 event_log_level = MFI_EVT_CLASS_CRITICAL;
8915 }
8916
8917 rval = driver_create_file(&megasas_pci_driver.driver,
8918 &driver_attr_version);
8919 if (rval)
8920 goto err_dcf_attr_ver;
8921
8922 rval = driver_create_file(&megasas_pci_driver.driver,
8923 &driver_attr_release_date);
8924 if (rval)
8925 goto err_dcf_rel_date;
8926
8927 rval = driver_create_file(&megasas_pci_driver.driver,
8928 &driver_attr_support_poll_for_event);
8929 if (rval)
8930 goto err_dcf_support_poll_for_event;
8931
8932 rval = driver_create_file(&megasas_pci_driver.driver,
8933 &driver_attr_dbg_lvl);
8934 if (rval)
8935 goto err_dcf_dbg_lvl;
8936 rval = driver_create_file(&megasas_pci_driver.driver,
8937 &driver_attr_support_device_change);
8938 if (rval)
8939 goto err_dcf_support_device_change;
8940
8941 rval = driver_create_file(&megasas_pci_driver.driver,
8942 &driver_attr_support_nvme_encapsulation);
8943 if (rval)
8944 goto err_dcf_support_nvme_encapsulation;
8945
8946 rval = driver_create_file(&megasas_pci_driver.driver,
8947 &driver_attr_support_pci_lane_margining);
8948 if (rval)
8949 goto err_dcf_support_pci_lane_margining;
8950
8951 return rval;
8952
8953 err_dcf_support_pci_lane_margining:
8954 driver_remove_file(&megasas_pci_driver.driver,
8955 &driver_attr_support_nvme_encapsulation);
8956
8957 err_dcf_support_nvme_encapsulation:
8958 driver_remove_file(&megasas_pci_driver.driver,
8959 &driver_attr_support_device_change);
8960
8961 err_dcf_support_device_change:
8962 driver_remove_file(&megasas_pci_driver.driver,
8963 &driver_attr_dbg_lvl);
8964 err_dcf_dbg_lvl:
8965 driver_remove_file(&megasas_pci_driver.driver,
8966 &driver_attr_support_poll_for_event);
8967 err_dcf_support_poll_for_event:
8968 driver_remove_file(&megasas_pci_driver.driver,
8969 &driver_attr_release_date);
8970 err_dcf_rel_date:
8971 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8972 err_dcf_attr_ver:
8973 pci_unregister_driver(&megasas_pci_driver);
8974 err_pcidrv:
8975 megasas_exit_debugfs();
8976 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8977 return rval;
8978 }
8979
8980 /**
8981 * megasas_exit - Driver unload entry point
8982 */
megasas_exit(void)8983 static void __exit megasas_exit(void)
8984 {
8985 driver_remove_file(&megasas_pci_driver.driver,
8986 &driver_attr_dbg_lvl);
8987 driver_remove_file(&megasas_pci_driver.driver,
8988 &driver_attr_support_poll_for_event);
8989 driver_remove_file(&megasas_pci_driver.driver,
8990 &driver_attr_support_device_change);
8991 driver_remove_file(&megasas_pci_driver.driver,
8992 &driver_attr_release_date);
8993 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8994 driver_remove_file(&megasas_pci_driver.driver,
8995 &driver_attr_support_nvme_encapsulation);
8996 driver_remove_file(&megasas_pci_driver.driver,
8997 &driver_attr_support_pci_lane_margining);
8998
8999 pci_unregister_driver(&megasas_pci_driver);
9000 megasas_exit_debugfs();
9001 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9002 }
9003
9004 module_init(megasas_init);
9005 module_exit(megasas_exit);
9006