1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 aradford@gmail.com
45
46 Note: This version of the driver does not contain a bundled firmware
47 image.
48
49 History
50 -------
51 2.26.02.000 - Driver cleanup for kernel submission.
52 2.26.02.001 - Replace schedule_timeout() calls with msleep().
53 2.26.02.002 - Add support for PAE mode.
54 Add lun support.
55 Fix twa_remove() to free irq handler/unregister_chrdev()
56 before shutting down card.
57 Change to new 'change_queue_depth' api.
58 Fix 'handled=1' ISR usage, remove bogus IRQ check.
59 Remove un-needed eh_abort handler.
60 Add support for embedded firmware error strings.
61 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62 2.26.02.004 - Add support for 9550SX controllers.
63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64 2.26.02.006 - Fix 9550SX pchip reset timeout.
65 Add big endian support.
66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67 2.26.02.008 - Free irq handler in __twa_shutdown().
68 Serialize reset code.
69 Add support for 9650SE controllers.
70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71 2.26.02.010 - Add support for 9690SA controllers.
72 2.26.02.011 - Increase max AENs drained to 256.
73 Add MSI support and "use_msi" module parameter.
74 Fix bug in twa_get_param() on 4GB+.
75 Use pci_resource_len() for ioremap().
76 2.26.02.012 - Add power management support.
77 2.26.02.013 - Fix bug in twa_load_sgl().
78 2.26.02.014 - Force 60 second timeout default.
79 */
80
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/slab.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <linux/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
101
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.014"
104 static DEFINE_MUTEX(twa_chrdev_mutex);
105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106 static unsigned int twa_device_extension_count;
107 static int twa_major = -1;
108 extern struct timezone sys_tz;
109
110 /* Module parameters */
111 MODULE_AUTHOR ("LSI");
112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(TW_DRIVER_VERSION);
115
116 static int use_msi = 0;
117 module_param(use_msi, int, S_IRUGO);
118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
119
120 /* Function prototypes */
121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123 static char *twa_aen_severity_lookup(unsigned char severity_code);
124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126 static int twa_chrdev_open(struct inode *inode, struct file *file);
127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
147 unsigned char *cdb, int use_sg,
148 TW_SG_Entry *sglistarg);
149 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
150 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
151
152 /* Functions */
153
154 /* Show some statistics about the card */
twa_show_stats(struct device * dev,struct device_attribute * attr,char * buf)155 static ssize_t twa_show_stats(struct device *dev,
156 struct device_attribute *attr, char *buf)
157 {
158 struct Scsi_Host *host = class_to_shost(dev);
159 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
160 unsigned long flags = 0;
161 ssize_t len;
162
163 spin_lock_irqsave(tw_dev->host->host_lock, flags);
164 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
165 "Current commands posted: %4d\n"
166 "Max commands posted: %4d\n"
167 "Current pending commands: %4d\n"
168 "Max pending commands: %4d\n"
169 "Last sgl length: %4d\n"
170 "Max sgl length: %4d\n"
171 "Last sector count: %4d\n"
172 "Max sector count: %4d\n"
173 "SCSI Host Resets: %4d\n"
174 "AEN's: %4d\n",
175 TW_DRIVER_VERSION,
176 tw_dev->posted_request_count,
177 tw_dev->max_posted_request_count,
178 tw_dev->pending_request_count,
179 tw_dev->max_pending_request_count,
180 tw_dev->sgl_entries,
181 tw_dev->max_sgl_entries,
182 tw_dev->sector_count,
183 tw_dev->max_sector_count,
184 tw_dev->num_resets,
185 tw_dev->aen_count);
186 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
187 return len;
188 } /* End twa_show_stats() */
189
190 /* Create sysfs 'stats' entry */
191 static struct device_attribute twa_host_stats_attr = {
192 .attr = {
193 .name = "stats",
194 .mode = S_IRUGO,
195 },
196 .show = twa_show_stats
197 };
198
199 /* Host attributes initializer */
200 static struct device_attribute *twa_host_attrs[] = {
201 &twa_host_stats_attr,
202 NULL,
203 };
204
205 /* File operations struct for character device */
206 static const struct file_operations twa_fops = {
207 .owner = THIS_MODULE,
208 .unlocked_ioctl = twa_chrdev_ioctl,
209 .open = twa_chrdev_open,
210 .release = NULL,
211 .llseek = noop_llseek,
212 };
213
214 /*
215 * The controllers use an inline buffer instead of a mapped SGL for small,
216 * single entry buffers. Note that we treat a zero-length transfer like
217 * a mapped SGL.
218 */
twa_command_mapped(struct scsi_cmnd * cmd)219 static bool twa_command_mapped(struct scsi_cmnd *cmd)
220 {
221 return scsi_sg_count(cmd) != 1 ||
222 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
223 }
224
225 /* This function will complete an aen request from the isr */
twa_aen_complete(TW_Device_Extension * tw_dev,int request_id)226 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
227 {
228 TW_Command_Full *full_command_packet;
229 TW_Command *command_packet;
230 TW_Command_Apache_Header *header;
231 unsigned short aen;
232 int retval = 1;
233
234 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
235 tw_dev->posted_request_count--;
236 aen = le16_to_cpu(header->status_block.error);
237 full_command_packet = tw_dev->command_packet_virt[request_id];
238 command_packet = &full_command_packet->command.oldcommand;
239
240 /* First check for internal completion of set param for time sync */
241 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
242 /* Keep reading the queue in case there are more aen's */
243 if (twa_aen_read_queue(tw_dev, request_id))
244 goto out2;
245 else {
246 retval = 0;
247 goto out;
248 }
249 }
250
251 switch (aen) {
252 case TW_AEN_QUEUE_EMPTY:
253 /* Quit reading the queue if this is the last one */
254 break;
255 case TW_AEN_SYNC_TIME_WITH_HOST:
256 twa_aen_sync_time(tw_dev, request_id);
257 retval = 0;
258 goto out;
259 default:
260 twa_aen_queue_event(tw_dev, header);
261
262 /* If there are more aen's, keep reading the queue */
263 if (twa_aen_read_queue(tw_dev, request_id))
264 goto out2;
265 else {
266 retval = 0;
267 goto out;
268 }
269 }
270 retval = 0;
271 out2:
272 tw_dev->state[request_id] = TW_S_COMPLETED;
273 twa_free_request_id(tw_dev, request_id);
274 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
275 out:
276 return retval;
277 } /* End twa_aen_complete() */
278
279 /* This function will drain aen queue */
twa_aen_drain_queue(TW_Device_Extension * tw_dev,int no_check_reset)280 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
281 {
282 int request_id = 0;
283 unsigned char cdb[TW_MAX_CDB_LEN];
284 TW_SG_Entry sglist[1];
285 int finished = 0, count = 0;
286 TW_Command_Full *full_command_packet;
287 TW_Command_Apache_Header *header;
288 unsigned short aen;
289 int first_reset = 0, queue = 0, retval = 1;
290
291 if (no_check_reset)
292 first_reset = 0;
293 else
294 first_reset = 1;
295
296 full_command_packet = tw_dev->command_packet_virt[request_id];
297 memset(full_command_packet, 0, sizeof(TW_Command_Full));
298
299 /* Initialize cdb */
300 memset(&cdb, 0, TW_MAX_CDB_LEN);
301 cdb[0] = REQUEST_SENSE; /* opcode */
302 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
303
304 /* Initialize sglist */
305 memset(&sglist, 0, sizeof(TW_SG_Entry));
306 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
307 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
308
309 if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
310 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
311 goto out;
312 }
313
314 /* Mark internal command */
315 tw_dev->srb[request_id] = NULL;
316
317 do {
318 /* Send command to the board */
319 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
320 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
321 goto out;
322 }
323
324 /* Now poll for completion */
325 if (twa_poll_response(tw_dev, request_id, 30)) {
326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
327 tw_dev->posted_request_count--;
328 goto out;
329 }
330
331 tw_dev->posted_request_count--;
332 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
333 aen = le16_to_cpu(header->status_block.error);
334 queue = 0;
335 count++;
336
337 switch (aen) {
338 case TW_AEN_QUEUE_EMPTY:
339 if (first_reset != 1)
340 goto out;
341 else
342 finished = 1;
343 break;
344 case TW_AEN_SOFT_RESET:
345 if (first_reset == 0)
346 first_reset = 1;
347 else
348 queue = 1;
349 break;
350 case TW_AEN_SYNC_TIME_WITH_HOST:
351 break;
352 default:
353 queue = 1;
354 }
355
356 /* Now queue an event info */
357 if (queue)
358 twa_aen_queue_event(tw_dev, header);
359 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
360
361 if (count == TW_MAX_AEN_DRAIN)
362 goto out;
363
364 retval = 0;
365 out:
366 tw_dev->state[request_id] = TW_S_INITIAL;
367 return retval;
368 } /* End twa_aen_drain_queue() */
369
370 /* This function will queue an event */
twa_aen_queue_event(TW_Device_Extension * tw_dev,TW_Command_Apache_Header * header)371 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
372 {
373 u32 local_time;
374 TW_Event *event;
375 unsigned short aen;
376 char host[16];
377 char *error_str;
378
379 tw_dev->aen_count++;
380
381 /* Fill out event info */
382 event = tw_dev->event_queue[tw_dev->error_index];
383
384 /* Check for clobber */
385 host[0] = '\0';
386 if (tw_dev->host) {
387 sprintf(host, " scsi%d:", tw_dev->host->host_no);
388 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
389 tw_dev->aen_clobber = 1;
390 }
391
392 aen = le16_to_cpu(header->status_block.error);
393 memset(event, 0, sizeof(TW_Event));
394
395 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
396 /* event->time_stamp_sec overflows in y2106 */
397 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
398 event->time_stamp_sec = local_time;
399 event->aen_code = aen;
400 event->retrieved = TW_AEN_NOT_RETRIEVED;
401 event->sequence_id = tw_dev->error_sequence_id;
402 tw_dev->error_sequence_id++;
403
404 /* Check for embedded error string */
405 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
406
407 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
408 event->parameter_len = strlen(header->err_specific_desc);
409 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
410 if (event->severity != TW_AEN_SEVERITY_DEBUG)
411 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
412 host,
413 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
414 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
415 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
416 header->err_specific_desc);
417 else
418 tw_dev->aen_count--;
419
420 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
421 tw_dev->event_queue_wrapped = 1;
422 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
423 } /* End twa_aen_queue_event() */
424
425 /* This function will read the aen queue from the isr */
twa_aen_read_queue(TW_Device_Extension * tw_dev,int request_id)426 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
427 {
428 unsigned char cdb[TW_MAX_CDB_LEN];
429 TW_SG_Entry sglist[1];
430 TW_Command_Full *full_command_packet;
431 int retval = 1;
432
433 full_command_packet = tw_dev->command_packet_virt[request_id];
434 memset(full_command_packet, 0, sizeof(TW_Command_Full));
435
436 /* Initialize cdb */
437 memset(&cdb, 0, TW_MAX_CDB_LEN);
438 cdb[0] = REQUEST_SENSE; /* opcode */
439 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
440
441 /* Initialize sglist */
442 memset(&sglist, 0, sizeof(TW_SG_Entry));
443 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
444 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
445
446 /* Mark internal command */
447 tw_dev->srb[request_id] = NULL;
448
449 /* Now post the command packet */
450 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
451 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
452 goto out;
453 }
454 retval = 0;
455 out:
456 return retval;
457 } /* End twa_aen_read_queue() */
458
459 /* This function will look up an AEN severity string */
twa_aen_severity_lookup(unsigned char severity_code)460 static char *twa_aen_severity_lookup(unsigned char severity_code)
461 {
462 char *retval = NULL;
463
464 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
465 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
466 goto out;
467
468 retval = twa_aen_severity_table[severity_code];
469 out:
470 return retval;
471 } /* End twa_aen_severity_lookup() */
472
473 /* This function will sync firmware time with the host time */
twa_aen_sync_time(TW_Device_Extension * tw_dev,int request_id)474 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
475 {
476 u32 schedulertime;
477 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet;
479 TW_Param_Apache *param;
480 time64_t local_time;
481
482 /* Fill out the command packet */
483 full_command_packet = tw_dev->command_packet_virt[request_id];
484 memset(full_command_packet, 0, sizeof(TW_Command_Full));
485 command_packet = &full_command_packet->command.oldcommand;
486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
487 command_packet->request_id = request_id;
488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
490 command_packet->size = TW_COMMAND_SIZE;
491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
492
493 /* Setup the param */
494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
495 memset(param, 0, TW_SECTOR_SIZE);
496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
497 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
498 param->parameter_size_bytes = cpu_to_le16(4);
499
500 /* Convert system time in UTC to local time seconds since last
501 Sunday 12:00AM */
502 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
503 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
504
505 memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
506
507 /* Mark internal command */
508 tw_dev->srb[request_id] = NULL;
509
510 /* Now post the command */
511 twa_post_command_packet(tw_dev, request_id, 1);
512 } /* End twa_aen_sync_time() */
513
514 /* This function will allocate memory and check if it is correctly aligned */
twa_allocate_memory(TW_Device_Extension * tw_dev,int size,int which)515 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
516 {
517 int i;
518 dma_addr_t dma_handle;
519 unsigned long *cpu_addr;
520 int retval = 1;
521
522 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
523 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
524 if (!cpu_addr) {
525 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
526 goto out;
527 }
528
529 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
530 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
531 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
532 cpu_addr, dma_handle);
533 goto out;
534 }
535
536 memset(cpu_addr, 0, size*TW_Q_LENGTH);
537
538 for (i = 0; i < TW_Q_LENGTH; i++) {
539 switch(which) {
540 case 0:
541 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
542 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
543 break;
544 case 1:
545 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
546 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
547 break;
548 }
549 }
550 retval = 0;
551 out:
552 return retval;
553 } /* End twa_allocate_memory() */
554
555 /* This function will check the status register for unexpected bits */
twa_check_bits(u32 status_reg_value)556 static int twa_check_bits(u32 status_reg_value)
557 {
558 int retval = 1;
559
560 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
561 goto out;
562 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
563 goto out;
564
565 retval = 0;
566 out:
567 return retval;
568 } /* End twa_check_bits() */
569
570 /* This function will check the srl and decide if we are compatible */
twa_check_srl(TW_Device_Extension * tw_dev,int * flashed)571 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
572 {
573 int retval = 1;
574 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
575 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
576 u32 init_connect_result = 0;
577
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
580 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
581 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
582 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
583 &fw_on_ctlr_build, &init_connect_result)) {
584 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
585 goto out;
586 }
587
588 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
589 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
590 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
591
592 /* Try base mode compatibility */
593 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
594 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
595 TW_EXTENDED_INIT_CONNECT,
596 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
597 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
598 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
599 &fw_on_ctlr_branch, &fw_on_ctlr_build,
600 &init_connect_result)) {
601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
602 goto out;
603 }
604 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
605 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
607 } else {
608 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
609 }
610 goto out;
611 }
612 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
613 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
614 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
615 }
616
617 /* Load rest of compatibility struct */
618 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
619 sizeof(tw_dev->tw_compat_info.driver_version));
620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
629
630 retval = 0;
631 out:
632 return retval;
633 } /* End twa_check_srl() */
634
635 /* This function handles ioctl for the character device */
twa_chrdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)636 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637 {
638 struct inode *inode = file_inode(file);
639 long timeout;
640 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
641 dma_addr_t dma_handle;
642 int request_id = 0;
643 unsigned int sequence_id = 0;
644 unsigned char event_index, start_index;
645 TW_Ioctl_Driver_Command driver_command;
646 TW_Ioctl_Buf_Apache *tw_ioctl;
647 TW_Lock *tw_lock;
648 TW_Command_Full *full_command_packet;
649 TW_Compatibility_Info *tw_compat_info;
650 TW_Event *event;
651 ktime_t current_time;
652 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
653 int retval = TW_IOCTL_ERROR_OS_EFAULT;
654 void __user *argp = (void __user *)arg;
655
656 mutex_lock(&twa_chrdev_mutex);
657
658 /* Only let one of these through at a time */
659 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
660 retval = TW_IOCTL_ERROR_OS_EINTR;
661 goto out;
662 }
663
664 /* First copy down the driver command */
665 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
666 goto out2;
667
668 /* Check data buffer size */
669 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
670 retval = TW_IOCTL_ERROR_OS_EINVAL;
671 goto out2;
672 }
673
674 /* Hardware can only do multiple of 512 byte transfers */
675 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
676
677 /* Now allocate ioctl buf memory */
678 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
679 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
680 &dma_handle, GFP_KERNEL);
681 if (!cpu_addr) {
682 retval = TW_IOCTL_ERROR_OS_ENOMEM;
683 goto out2;
684 }
685
686 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
687
688 /* Now copy down the entire ioctl */
689 if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
690 goto out3;
691
692 /* See which ioctl we are doing */
693 switch (cmd) {
694 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
695 spin_lock_irqsave(tw_dev->host->host_lock, flags);
696 twa_get_request_id(tw_dev, &request_id);
697
698 /* Flag internal command */
699 tw_dev->srb[request_id] = NULL;
700
701 /* Flag chrdev ioctl */
702 tw_dev->chrdev_request_id = request_id;
703
704 full_command_packet = &tw_ioctl->firmware_command;
705
706 /* Load request id and sglist for both command types */
707 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
708
709 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
710
711 /* Now post the command packet to the controller */
712 twa_post_command_packet(tw_dev, request_id, 1);
713 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
714
715 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
716
717 /* Now wait for command to complete */
718 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
719
720 /* We timed out, and didn't get an interrupt */
721 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
722 /* Now we need to reset the board */
723 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
724 tw_dev->host->host_no, TW_DRIVER, 0x37,
725 cmd);
726 retval = TW_IOCTL_ERROR_OS_EIO;
727 twa_reset_device_extension(tw_dev);
728 goto out3;
729 }
730
731 /* Now copy in the command packet response */
732 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
733
734 /* Now complete the io */
735 spin_lock_irqsave(tw_dev->host->host_lock, flags);
736 tw_dev->posted_request_count--;
737 tw_dev->state[request_id] = TW_S_COMPLETED;
738 twa_free_request_id(tw_dev, request_id);
739 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
740 break;
741 case TW_IOCTL_GET_COMPATIBILITY_INFO:
742 tw_ioctl->driver_command.status = 0;
743 /* Copy compatibility struct into ioctl data buffer */
744 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
745 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
746 break;
747 case TW_IOCTL_GET_LAST_EVENT:
748 if (tw_dev->event_queue_wrapped) {
749 if (tw_dev->aen_clobber) {
750 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
751 tw_dev->aen_clobber = 0;
752 } else
753 tw_ioctl->driver_command.status = 0;
754 } else {
755 if (!tw_dev->error_index) {
756 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
757 break;
758 }
759 tw_ioctl->driver_command.status = 0;
760 }
761 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
762 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
763 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
764 break;
765 case TW_IOCTL_GET_FIRST_EVENT:
766 if (tw_dev->event_queue_wrapped) {
767 if (tw_dev->aen_clobber) {
768 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
769 tw_dev->aen_clobber = 0;
770 } else
771 tw_ioctl->driver_command.status = 0;
772 event_index = tw_dev->error_index;
773 } else {
774 if (!tw_dev->error_index) {
775 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
776 break;
777 }
778 tw_ioctl->driver_command.status = 0;
779 event_index = 0;
780 }
781 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
782 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
783 break;
784 case TW_IOCTL_GET_NEXT_EVENT:
785 event = (TW_Event *)tw_ioctl->data_buffer;
786 sequence_id = event->sequence_id;
787 tw_ioctl->driver_command.status = 0;
788
789 if (tw_dev->event_queue_wrapped) {
790 if (tw_dev->aen_clobber) {
791 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
792 tw_dev->aen_clobber = 0;
793 }
794 start_index = tw_dev->error_index;
795 } else {
796 if (!tw_dev->error_index) {
797 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
798 break;
799 }
800 start_index = 0;
801 }
802 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
803
804 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
805 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
806 tw_dev->aen_clobber = 1;
807 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
808 break;
809 }
810 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
811 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
812 break;
813 case TW_IOCTL_GET_PREVIOUS_EVENT:
814 event = (TW_Event *)tw_ioctl->data_buffer;
815 sequence_id = event->sequence_id;
816 tw_ioctl->driver_command.status = 0;
817
818 if (tw_dev->event_queue_wrapped) {
819 if (tw_dev->aen_clobber) {
820 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
821 tw_dev->aen_clobber = 0;
822 }
823 start_index = tw_dev->error_index;
824 } else {
825 if (!tw_dev->error_index) {
826 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
827 break;
828 }
829 start_index = 0;
830 }
831 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
832
833 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
834 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
835 tw_dev->aen_clobber = 1;
836 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
837 break;
838 }
839 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
840 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
841 break;
842 case TW_IOCTL_GET_LOCK:
843 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
844 current_time = ktime_get();
845
846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
847 ktime_after(current_time, tw_dev->ioctl_time)) {
848 tw_dev->ioctl_sem_lock = 1;
849 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
850 tw_ioctl->driver_command.status = 0;
851 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
852 } else {
853 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
854 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
855 }
856 break;
857 case TW_IOCTL_RELEASE_LOCK:
858 if (tw_dev->ioctl_sem_lock == 1) {
859 tw_dev->ioctl_sem_lock = 0;
860 tw_ioctl->driver_command.status = 0;
861 } else {
862 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
863 }
864 break;
865 default:
866 retval = TW_IOCTL_ERROR_OS_ENOTTY;
867 goto out3;
868 }
869
870 /* Now copy the entire response to userspace */
871 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
872 retval = 0;
873 out3:
874 /* Now free ioctl buf memory */
875 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
876 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
877 cpu_addr, dma_handle);
878 out2:
879 mutex_unlock(&tw_dev->ioctl_lock);
880 out:
881 mutex_unlock(&twa_chrdev_mutex);
882 return retval;
883 } /* End twa_chrdev_ioctl() */
884
885 /* This function handles open for the character device */
886 /* NOTE that this function will race with remove. */
twa_chrdev_open(struct inode * inode,struct file * file)887 static int twa_chrdev_open(struct inode *inode, struct file *file)
888 {
889 unsigned int minor_number;
890 int retval = TW_IOCTL_ERROR_OS_ENODEV;
891
892 if (!capable(CAP_SYS_ADMIN)) {
893 retval = -EACCES;
894 goto out;
895 }
896
897 minor_number = iminor(inode);
898 if (minor_number >= twa_device_extension_count)
899 goto out;
900 retval = 0;
901 out:
902 return retval;
903 } /* End twa_chrdev_open() */
904
905 /* This function will print readable messages from status register errors */
twa_decode_bits(TW_Device_Extension * tw_dev,u32 status_reg_value)906 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
907 {
908 int retval = 1;
909
910 /* Check for various error conditions and handle them appropriately */
911 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
913 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
914 }
915
916 if (status_reg_value & TW_STATUS_PCI_ABORT) {
917 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
918 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
919 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
920 }
921
922 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
923 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
924 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
925 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
926 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
927 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
928 }
929
930 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
931 if (tw_dev->reset_print == 0) {
932 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
933 tw_dev->reset_print = 1;
934 }
935 goto out;
936 }
937 retval = 0;
938 out:
939 return retval;
940 } /* End twa_decode_bits() */
941
942 /* This function will empty the response queue */
twa_empty_response_queue(TW_Device_Extension * tw_dev)943 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
944 {
945 u32 status_reg_value;
946 int count = 0, retval = 1;
947
948 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
949
950 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
951 readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
952 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
953 count++;
954 }
955 if (count == TW_MAX_RESPONSE_DRAIN)
956 goto out;
957
958 retval = 0;
959 out:
960 return retval;
961 } /* End twa_empty_response_queue() */
962
963 /* This function will clear the pchip/response queue on 9550SX */
twa_empty_response_queue_large(TW_Device_Extension * tw_dev)964 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
965 {
966 u32 response_que_value = 0;
967 unsigned long before;
968 int retval = 1;
969
970 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
971 before = jiffies;
972 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
973 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
974 msleep(1);
975 if (time_after(jiffies, before + HZ * 30))
976 goto out;
977 }
978 /* P-chip settle time */
979 msleep(500);
980 retval = 0;
981 } else
982 retval = 0;
983 out:
984 return retval;
985 } /* End twa_empty_response_queue_large() */
986
987 /* This function passes sense keys from firmware to scsi layer */
twa_fill_sense(TW_Device_Extension * tw_dev,int request_id,int copy_sense,int print_host)988 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
989 {
990 TW_Command_Full *full_command_packet;
991 unsigned short error;
992 int retval = 1;
993 char *error_str;
994
995 full_command_packet = tw_dev->command_packet_virt[request_id];
996
997 /* Check for embedded error string */
998 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
999
1000 /* Don't print error for Logical unit not supported during rollcall */
1001 error = le16_to_cpu(full_command_packet->header.status_block.error);
1002 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1003 if (print_host)
1004 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1005 tw_dev->host->host_no,
1006 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1007 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1008 full_command_packet->header.err_specific_desc);
1009 else
1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1012 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1013 full_command_packet->header.err_specific_desc);
1014 }
1015
1016 if (copy_sense) {
1017 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1018 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1019 retval = TW_ISR_DONT_RESULT;
1020 goto out;
1021 }
1022 retval = 0;
1023 out:
1024 return retval;
1025 } /* End twa_fill_sense() */
1026
1027 /* This function will free up device extension resources */
twa_free_device_extension(TW_Device_Extension * tw_dev)1028 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1029 {
1030 if (tw_dev->command_packet_virt[0])
1031 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1032 sizeof(TW_Command_Full) * TW_Q_LENGTH,
1033 tw_dev->command_packet_virt[0],
1034 tw_dev->command_packet_phys[0]);
1035
1036 if (tw_dev->generic_buffer_virt[0])
1037 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1038 TW_SECTOR_SIZE * TW_Q_LENGTH,
1039 tw_dev->generic_buffer_virt[0],
1040 tw_dev->generic_buffer_phys[0]);
1041
1042 kfree(tw_dev->event_queue[0]);
1043 } /* End twa_free_device_extension() */
1044
1045 /* This function will free a request id */
twa_free_request_id(TW_Device_Extension * tw_dev,int request_id)1046 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1047 {
1048 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1049 tw_dev->state[request_id] = TW_S_FINISHED;
1050 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1051 } /* End twa_free_request_id() */
1052
1053 /* This function will get parameter table entries from the firmware */
twa_get_param(TW_Device_Extension * tw_dev,int request_id,int table_id,int parameter_id,int parameter_size_bytes)1054 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1055 {
1056 TW_Command_Full *full_command_packet;
1057 TW_Command *command_packet;
1058 TW_Param_Apache *param;
1059 void *retval = NULL;
1060
1061 /* Setup the command packet */
1062 full_command_packet = tw_dev->command_packet_virt[request_id];
1063 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1064 command_packet = &full_command_packet->command.oldcommand;
1065
1066 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1067 command_packet->size = TW_COMMAND_SIZE;
1068 command_packet->request_id = request_id;
1069 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1070
1071 /* Now setup the param */
1072 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1073 memset(param, 0, TW_SECTOR_SIZE);
1074 param->table_id = cpu_to_le16(table_id | 0x8000);
1075 param->parameter_id = cpu_to_le16(parameter_id);
1076 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1077
1078 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1079 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1080
1081 /* Post the command packet to the board */
1082 twa_post_command_packet(tw_dev, request_id, 1);
1083
1084 /* Poll for completion */
1085 if (twa_poll_response(tw_dev, request_id, 30))
1086 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1087 else
1088 retval = (void *)&(param->data[0]);
1089
1090 tw_dev->posted_request_count--;
1091 tw_dev->state[request_id] = TW_S_INITIAL;
1092
1093 return retval;
1094 } /* End twa_get_param() */
1095
1096 /* This function will assign an available request id */
twa_get_request_id(TW_Device_Extension * tw_dev,int * request_id)1097 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1098 {
1099 *request_id = tw_dev->free_queue[tw_dev->free_head];
1100 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1101 tw_dev->state[*request_id] = TW_S_STARTED;
1102 } /* End twa_get_request_id() */
1103
1104 /* This function will send an initconnection command to controller */
twa_initconnection(TW_Device_Extension * tw_dev,int message_credits,u32 set_features,unsigned short current_fw_srl,unsigned short current_fw_arch_id,unsigned short current_fw_branch,unsigned short current_fw_build,unsigned short * fw_on_ctlr_srl,unsigned short * fw_on_ctlr_arch_id,unsigned short * fw_on_ctlr_branch,unsigned short * fw_on_ctlr_build,u32 * init_connect_result)1105 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1106 u32 set_features, unsigned short current_fw_srl,
1107 unsigned short current_fw_arch_id,
1108 unsigned short current_fw_branch,
1109 unsigned short current_fw_build,
1110 unsigned short *fw_on_ctlr_srl,
1111 unsigned short *fw_on_ctlr_arch_id,
1112 unsigned short *fw_on_ctlr_branch,
1113 unsigned short *fw_on_ctlr_build,
1114 u32 *init_connect_result)
1115 {
1116 TW_Command_Full *full_command_packet;
1117 TW_Initconnect *tw_initconnect;
1118 int request_id = 0, retval = 1;
1119
1120 /* Initialize InitConnection command packet */
1121 full_command_packet = tw_dev->command_packet_virt[request_id];
1122 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1123 full_command_packet->header.header_desc.size_header = 128;
1124
1125 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1126 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1127 tw_initconnect->request_id = request_id;
1128 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1129
1130 /* Turn on 64-bit sgl support if we need to */
1131 set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1132
1133 tw_initconnect->features = cpu_to_le32(set_features);
1134
1135 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1136 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1137 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1138 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1139 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1140 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1141 } else
1142 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1143
1144 /* Send command packet to the board */
1145 twa_post_command_packet(tw_dev, request_id, 1);
1146
1147 /* Poll for completion */
1148 if (twa_poll_response(tw_dev, request_id, 30)) {
1149 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1150 } else {
1151 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1152 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1153 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1154 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1155 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1156 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1157 }
1158 retval = 0;
1159 }
1160
1161 tw_dev->posted_request_count--;
1162 tw_dev->state[request_id] = TW_S_INITIAL;
1163
1164 return retval;
1165 } /* End twa_initconnection() */
1166
1167 /* This function will initialize the fields of a device extension */
twa_initialize_device_extension(TW_Device_Extension * tw_dev)1168 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1169 {
1170 int i, retval = 1;
1171
1172 /* Initialize command packet buffers */
1173 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1174 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1175 goto out;
1176 }
1177
1178 /* Initialize generic buffer */
1179 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1180 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1181 goto out;
1182 }
1183
1184 /* Allocate event info space */
1185 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1186 if (!tw_dev->event_queue[0]) {
1187 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1188 goto out;
1189 }
1190
1191
1192 for (i = 0; i < TW_Q_LENGTH; i++) {
1193 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1194 tw_dev->free_queue[i] = i;
1195 tw_dev->state[i] = TW_S_INITIAL;
1196 }
1197
1198 tw_dev->pending_head = TW_Q_START;
1199 tw_dev->pending_tail = TW_Q_START;
1200 tw_dev->free_head = TW_Q_START;
1201 tw_dev->free_tail = TW_Q_START;
1202 tw_dev->error_sequence_id = 1;
1203 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1204
1205 mutex_init(&tw_dev->ioctl_lock);
1206 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1207
1208 retval = 0;
1209 out:
1210 return retval;
1211 } /* End twa_initialize_device_extension() */
1212
1213 /* This function is the interrupt service routine */
twa_interrupt(int irq,void * dev_instance)1214 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1215 {
1216 int request_id, error = 0;
1217 u32 status_reg_value;
1218 TW_Response_Queue response_que;
1219 TW_Command_Full *full_command_packet;
1220 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1221 int handled = 0;
1222
1223 /* Get the per adapter lock */
1224 spin_lock(tw_dev->host->host_lock);
1225
1226 /* Read the registers */
1227 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1228
1229 /* Check if this is our interrupt, otherwise bail */
1230 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1231 goto twa_interrupt_bail;
1232
1233 handled = 1;
1234
1235 /* If we are resetting, bail */
1236 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1237 goto twa_interrupt_bail;
1238
1239 /* Check controller for errors */
1240 if (twa_check_bits(status_reg_value)) {
1241 if (twa_decode_bits(tw_dev, status_reg_value)) {
1242 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1243 goto twa_interrupt_bail;
1244 }
1245 }
1246
1247 /* Handle host interrupt */
1248 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1249 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1250
1251 /* Handle attention interrupt */
1252 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1253 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1254 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1255 twa_get_request_id(tw_dev, &request_id);
1256
1257 error = twa_aen_read_queue(tw_dev, request_id);
1258 if (error) {
1259 tw_dev->state[request_id] = TW_S_COMPLETED;
1260 twa_free_request_id(tw_dev, request_id);
1261 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1262 }
1263 }
1264 }
1265
1266 /* Handle command interrupt */
1267 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1268 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1269 /* Drain as many pending commands as we can */
1270 while (tw_dev->pending_request_count > 0) {
1271 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1272 if (tw_dev->state[request_id] != TW_S_PENDING) {
1273 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1274 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1275 goto twa_interrupt_bail;
1276 }
1277 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1278 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1279 tw_dev->pending_request_count--;
1280 } else {
1281 /* If we get here, we will continue re-posting on the next command interrupt */
1282 break;
1283 }
1284 }
1285 }
1286
1287 /* Handle response interrupt */
1288 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1289
1290 /* Drain the response queue from the board */
1291 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1292 /* Complete the response */
1293 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1294 request_id = TW_RESID_OUT(response_que.response_id);
1295 full_command_packet = tw_dev->command_packet_virt[request_id];
1296 error = 0;
1297 /* Check for command packet errors */
1298 if (full_command_packet->command.newcommand.status != 0) {
1299 if (tw_dev->srb[request_id] != NULL) {
1300 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1301 } else {
1302 /* Skip ioctl error prints */
1303 if (request_id != tw_dev->chrdev_request_id) {
1304 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1305 }
1306 }
1307 }
1308
1309 /* Check for correct state */
1310 if (tw_dev->state[request_id] != TW_S_POSTED) {
1311 if (tw_dev->srb[request_id] != NULL) {
1312 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1313 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1314 goto twa_interrupt_bail;
1315 }
1316 }
1317
1318 /* Check for internal command completion */
1319 if (tw_dev->srb[request_id] == NULL) {
1320 if (request_id != tw_dev->chrdev_request_id) {
1321 if (twa_aen_complete(tw_dev, request_id))
1322 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1323 } else {
1324 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1325 wake_up(&tw_dev->ioctl_wqueue);
1326 }
1327 } else {
1328 struct scsi_cmnd *cmd;
1329
1330 cmd = tw_dev->srb[request_id];
1331
1332 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1333 /* If no error command was a success */
1334 if (error == 0) {
1335 cmd->result = (DID_OK << 16);
1336 }
1337
1338 /* If error, command failed */
1339 if (error == 1) {
1340 /* Ask for a host reset */
1341 cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1342 }
1343
1344 /* Report residual bytes for single sgl */
1345 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1346 u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
1347
1348 if (length < scsi_bufflen(cmd))
1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
1350 }
1351
1352 /* Now complete the io */
1353 if (twa_command_mapped(cmd))
1354 scsi_dma_unmap(cmd);
1355 cmd->scsi_done(cmd);
1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1357 twa_free_request_id(tw_dev, request_id);
1358 tw_dev->posted_request_count--;
1359 }
1360
1361 /* Check for valid status after each drain */
1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1363 if (twa_check_bits(status_reg_value)) {
1364 if (twa_decode_bits(tw_dev, status_reg_value)) {
1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1366 goto twa_interrupt_bail;
1367 }
1368 }
1369 }
1370 }
1371
1372 twa_interrupt_bail:
1373 spin_unlock(tw_dev->host->host_lock);
1374 return IRQ_RETVAL(handled);
1375 } /* End twa_interrupt() */
1376
1377 /* This function will load the request id and various sgls for ioctls */
twa_load_sgl(TW_Device_Extension * tw_dev,TW_Command_Full * full_command_packet,int request_id,dma_addr_t dma_handle,int length)1378 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1379 {
1380 TW_Command *oldcommand;
1381 TW_Command_Apache *newcommand;
1382 TW_SG_Entry *sgl;
1383 unsigned int pae = 0;
1384
1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1386 pae = 1;
1387
1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1389 newcommand = &full_command_packet->command.newcommand;
1390 newcommand->request_id__lunl =
1391 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
1392 if (length) {
1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1394 newcommand->sg_list[0].length = cpu_to_le32(length);
1395 }
1396 newcommand->sgl_entries__lunh =
1397 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
1398 } else {
1399 oldcommand = &full_command_packet->command.oldcommand;
1400 oldcommand->request_id = request_id;
1401
1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1403 /* Load the sg list */
1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1406 else
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1409 sgl->length = cpu_to_le32(length);
1410
1411 oldcommand->size += pae;
1412 }
1413 }
1414 } /* End twa_load_sgl() */
1415
1416 /* This function will poll for a response interrupt of a request */
twa_poll_response(TW_Device_Extension * tw_dev,int request_id,int seconds)1417 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1418 {
1419 int retval = 1, found = 0, response_request_id;
1420 TW_Response_Queue response_queue;
1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1422
1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1425 response_request_id = TW_RESID_OUT(response_queue.response_id);
1426 if (request_id != response_request_id) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1428 goto out;
1429 }
1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1431 if (full_command_packet->command.newcommand.status != 0) {
1432 /* bad response */
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1435 }
1436 found = 1;
1437 } else {
1438 if (full_command_packet->command.oldcommand.status != 0) {
1439 /* bad response */
1440 twa_fill_sense(tw_dev, request_id, 0, 0);
1441 goto out;
1442 }
1443 found = 1;
1444 }
1445 }
1446
1447 if (found)
1448 retval = 0;
1449 out:
1450 return retval;
1451 } /* End twa_poll_response() */
1452
1453 /* This function will poll the status register for a flag */
twa_poll_status(TW_Device_Extension * tw_dev,u32 flag,int seconds)1454 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1455 {
1456 u32 status_reg_value;
1457 unsigned long before;
1458 int retval = 1;
1459
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461 before = jiffies;
1462
1463 if (twa_check_bits(status_reg_value))
1464 twa_decode_bits(tw_dev, status_reg_value);
1465
1466 while ((status_reg_value & flag) != flag) {
1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1468
1469 if (twa_check_bits(status_reg_value))
1470 twa_decode_bits(tw_dev, status_reg_value);
1471
1472 if (time_after(jiffies, before + HZ * seconds))
1473 goto out;
1474
1475 msleep(50);
1476 }
1477 retval = 0;
1478 out:
1479 return retval;
1480 } /* End twa_poll_status() */
1481
1482 /* This function will poll the status register for disappearance of a flag */
twa_poll_status_gone(TW_Device_Extension * tw_dev,u32 flag,int seconds)1483 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1484 {
1485 u32 status_reg_value;
1486 unsigned long before;
1487 int retval = 1;
1488
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 before = jiffies;
1491
1492 if (twa_check_bits(status_reg_value))
1493 twa_decode_bits(tw_dev, status_reg_value);
1494
1495 while ((status_reg_value & flag) != 0) {
1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1497 if (twa_check_bits(status_reg_value))
1498 twa_decode_bits(tw_dev, status_reg_value);
1499
1500 if (time_after(jiffies, before + HZ * seconds))
1501 goto out;
1502
1503 msleep(50);
1504 }
1505 retval = 0;
1506 out:
1507 return retval;
1508 } /* End twa_poll_status_gone() */
1509
1510 /* This function will attempt to post a command packet to the board */
twa_post_command_packet(TW_Device_Extension * tw_dev,int request_id,char internal)1511 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1512 {
1513 u32 status_reg_value;
1514 dma_addr_t command_que_value;
1515 int retval = 1;
1516
1517 command_que_value = tw_dev->command_packet_phys[request_id];
1518
1519 /* For 9650SE write low 4 bytes first */
1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1522 command_que_value += TW_COMMAND_OFFSET;
1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1524 }
1525
1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1527
1528 if (twa_check_bits(status_reg_value))
1529 twa_decode_bits(tw_dev, status_reg_value);
1530
1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1532
1533 /* Only pend internal driver commands */
1534 if (!internal) {
1535 retval = SCSI_MLQUEUE_HOST_BUSY;
1536 goto out;
1537 }
1538
1539 /* Couldn't post the command packet, so we do it later */
1540 if (tw_dev->state[request_id] != TW_S_PENDING) {
1541 tw_dev->state[request_id] = TW_S_PENDING;
1542 tw_dev->pending_request_count++;
1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1545 }
1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1548 }
1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1550 goto out;
1551 } else {
1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1554 /* Now write upper 4 bytes */
1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1556 } else {
1557 if (sizeof(dma_addr_t) > 4) {
1558 command_que_value += TW_COMMAND_OFFSET;
1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1561 } else {
1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1563 }
1564 }
1565 tw_dev->state[request_id] = TW_S_POSTED;
1566 tw_dev->posted_request_count++;
1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1569 }
1570 }
1571 retval = 0;
1572 out:
1573 return retval;
1574 } /* End twa_post_command_packet() */
1575
1576 /* This function will reset a device extension */
twa_reset_device_extension(TW_Device_Extension * tw_dev)1577 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1578 {
1579 int i = 0;
1580 int retval = 1;
1581 unsigned long flags = 0;
1582
1583 set_bit(TW_IN_RESET, &tw_dev->flags);
1584 TW_DISABLE_INTERRUPTS(tw_dev);
1585 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1586 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1587
1588 /* Abort all requests that are in progress */
1589 for (i = 0; i < TW_Q_LENGTH; i++) {
1590 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1591 (tw_dev->state[i] != TW_S_INITIAL) &&
1592 (tw_dev->state[i] != TW_S_COMPLETED)) {
1593 if (tw_dev->srb[i]) {
1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1595
1596 cmd->result = (DID_RESET << 16);
1597 if (twa_command_mapped(cmd))
1598 scsi_dma_unmap(cmd);
1599 cmd->scsi_done(cmd);
1600 }
1601 }
1602 }
1603
1604 /* Reset queues and counts */
1605 for (i = 0; i < TW_Q_LENGTH; i++) {
1606 tw_dev->free_queue[i] = i;
1607 tw_dev->state[i] = TW_S_INITIAL;
1608 }
1609 tw_dev->free_head = TW_Q_START;
1610 tw_dev->free_tail = TW_Q_START;
1611 tw_dev->posted_request_count = 0;
1612 tw_dev->pending_request_count = 0;
1613 tw_dev->pending_head = TW_Q_START;
1614 tw_dev->pending_tail = TW_Q_START;
1615 tw_dev->reset_print = 0;
1616
1617 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1618
1619 if (twa_reset_sequence(tw_dev, 1))
1620 goto out;
1621
1622 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1623 clear_bit(TW_IN_RESET, &tw_dev->flags);
1624 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1625
1626 retval = 0;
1627 out:
1628 return retval;
1629 } /* End twa_reset_device_extension() */
1630
1631 /* This function will reset a controller */
twa_reset_sequence(TW_Device_Extension * tw_dev,int soft_reset)1632 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1633 {
1634 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1635
1636 while (tries < TW_MAX_RESET_TRIES) {
1637 if (do_soft_reset) {
1638 TW_SOFT_RESET(tw_dev);
1639 /* Clear pchip/response queue on 9550SX */
1640 if (twa_empty_response_queue_large(tw_dev)) {
1641 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1642 do_soft_reset = 1;
1643 tries++;
1644 continue;
1645 }
1646 }
1647
1648 /* Make sure controller is in a good state */
1649 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1651 do_soft_reset = 1;
1652 tries++;
1653 continue;
1654 }
1655
1656 /* Empty response queue */
1657 if (twa_empty_response_queue(tw_dev)) {
1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1659 do_soft_reset = 1;
1660 tries++;
1661 continue;
1662 }
1663
1664 flashed = 0;
1665
1666 /* Check for compatibility/flash */
1667 if (twa_check_srl(tw_dev, &flashed)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1672 } else {
1673 if (flashed) {
1674 tries++;
1675 continue;
1676 }
1677 }
1678
1679 /* Drain the AEN queue */
1680 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1682 do_soft_reset = 1;
1683 tries++;
1684 continue;
1685 }
1686
1687 /* If we got here, controller is in a good state */
1688 retval = 0;
1689 goto out;
1690 }
1691 out:
1692 return retval;
1693 } /* End twa_reset_sequence() */
1694
1695 /* This funciton returns unit geometry in cylinders/heads/sectors */
twa_scsi_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])1696 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1697 {
1698 int heads, sectors, cylinders;
1699
1700 if (capacity >= 0x200000) {
1701 heads = 255;
1702 sectors = 63;
1703 cylinders = sector_div(capacity, heads * sectors);
1704 } else {
1705 heads = 64;
1706 sectors = 32;
1707 cylinders = sector_div(capacity, heads * sectors);
1708 }
1709
1710 geom[0] = heads;
1711 geom[1] = sectors;
1712 geom[2] = cylinders;
1713
1714 return 0;
1715 } /* End twa_scsi_biosparam() */
1716
1717 /* This is the new scsi eh reset function */
twa_scsi_eh_reset(struct scsi_cmnd * SCpnt)1718 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1719 {
1720 TW_Device_Extension *tw_dev = NULL;
1721 int retval = FAILED;
1722
1723 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1724
1725 tw_dev->num_resets++;
1726
1727 sdev_printk(KERN_WARNING, SCpnt->device,
1728 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1729 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1730
1731 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1732 mutex_lock(&tw_dev->ioctl_lock);
1733
1734 /* Now reset the card and some of the device extension data */
1735 if (twa_reset_device_extension(tw_dev)) {
1736 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1737 goto out;
1738 }
1739
1740 retval = SUCCESS;
1741 out:
1742 mutex_unlock(&tw_dev->ioctl_lock);
1743 return retval;
1744 } /* End twa_scsi_eh_reset() */
1745
1746 /* This is the main scsi queue function to handle scsi opcodes */
twa_scsi_queue_lck(struct scsi_cmnd * SCpnt,void (* done)(struct scsi_cmnd *))1747 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1748 {
1749 int request_id, retval;
1750 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1751
1752 /* If we are resetting due to timed out ioctl, report as busy */
1753 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1754 retval = SCSI_MLQUEUE_HOST_BUSY;
1755 goto out;
1756 }
1757
1758 /* Check if this FW supports luns */
1759 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1760 SCpnt->result = (DID_BAD_TARGET << 16);
1761 done(SCpnt);
1762 retval = 0;
1763 goto out;
1764 }
1765
1766 /* Save done function into scsi_cmnd struct */
1767 SCpnt->scsi_done = done;
1768
1769 /* Get a free request id */
1770 twa_get_request_id(tw_dev, &request_id);
1771
1772 /* Save the scsi command for use by the ISR */
1773 tw_dev->srb[request_id] = SCpnt;
1774
1775 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776 switch (retval) {
1777 case SCSI_MLQUEUE_HOST_BUSY:
1778 if (twa_command_mapped(SCpnt))
1779 scsi_dma_unmap(SCpnt);
1780 twa_free_request_id(tw_dev, request_id);
1781 break;
1782 case 1:
1783 SCpnt->result = (DID_ERROR << 16);
1784 if (twa_command_mapped(SCpnt))
1785 scsi_dma_unmap(SCpnt);
1786 done(SCpnt);
1787 tw_dev->state[request_id] = TW_S_COMPLETED;
1788 twa_free_request_id(tw_dev, request_id);
1789 retval = 0;
1790 }
1791 out:
1792 return retval;
1793 } /* End twa_scsi_queue() */
1794
DEF_SCSI_QCMD(twa_scsi_queue)1795 static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797 /* This function hands scsi cdb's to the firmware */
1798 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1799 unsigned char *cdb, int use_sg,
1800 TW_SG_Entry *sglistarg)
1801 {
1802 TW_Command_Full *full_command_packet;
1803 TW_Command_Apache *command_packet;
1804 u32 num_sectors = 0x0;
1805 int i, sg_count;
1806 struct scsi_cmnd *srb = NULL;
1807 struct scatterlist *sg;
1808 int retval = 1;
1809
1810 if (tw_dev->srb[request_id])
1811 srb = tw_dev->srb[request_id];
1812
1813 /* Initialize command packet */
1814 full_command_packet = tw_dev->command_packet_virt[request_id];
1815 full_command_packet->header.header_desc.size_header = 128;
1816 full_command_packet->header.status_block.error = 0;
1817 full_command_packet->header.status_block.severity__reserved = 0;
1818
1819 command_packet = &full_command_packet->command.newcommand;
1820 command_packet->status = 0;
1821 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1822
1823 /* We forced 16 byte cdb use earlier */
1824 if (!cdb)
1825 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1826 else
1827 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1828
1829 if (srb) {
1830 command_packet->unit = srb->device->id;
1831 command_packet->request_id__lunl =
1832 TW_REQ_LUN_IN(srb->device->lun, request_id);
1833 } else {
1834 command_packet->request_id__lunl =
1835 TW_REQ_LUN_IN(0, request_id);
1836 command_packet->unit = 0;
1837 }
1838
1839 command_packet->sgl_offset = 16;
1840
1841 if (!sglistarg) {
1842 /* Map sglist from scsi layer to cmd packet */
1843
1844 if (scsi_sg_count(srb)) {
1845 if (!twa_command_mapped(srb)) {
1846 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1847 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1848 scsi_sg_copy_to_buffer(srb,
1849 tw_dev->generic_buffer_virt[request_id],
1850 TW_SECTOR_SIZE);
1851 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1852 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1853 } else {
1854 sg_count = scsi_dma_map(srb);
1855 if (sg_count < 0)
1856 goto out;
1857
1858 scsi_for_each_sg(srb, sg, sg_count, i) {
1859 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1860 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1861 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1863 goto out;
1864 }
1865 }
1866 }
1867 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
1868 }
1869 } else {
1870 /* Internal cdb post */
1871 for (i = 0; i < use_sg; i++) {
1872 command_packet->sg_list[i].address = sglistarg[i].address;
1873 command_packet->sg_list[i].length = sglistarg[i].length;
1874 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1875 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1876 goto out;
1877 }
1878 }
1879 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
1880 }
1881
1882 if (srb) {
1883 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1884 num_sectors = (u32)srb->cmnd[4];
1885
1886 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1887 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1888 }
1889
1890 /* Update sector statistic */
1891 tw_dev->sector_count = num_sectors;
1892 if (tw_dev->sector_count > tw_dev->max_sector_count)
1893 tw_dev->max_sector_count = tw_dev->sector_count;
1894
1895 /* Update SG statistics */
1896 if (srb) {
1897 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1898 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1899 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1900 }
1901
1902 /* Now post the command to the board */
1903 if (srb) {
1904 retval = twa_post_command_packet(tw_dev, request_id, 0);
1905 } else {
1906 twa_post_command_packet(tw_dev, request_id, 1);
1907 retval = 0;
1908 }
1909 out:
1910 return retval;
1911 } /* End twa_scsiop_execute_scsi() */
1912
1913 /* This function completes an execute scsi operation */
twa_scsiop_execute_scsi_complete(TW_Device_Extension * tw_dev,int request_id)1914 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1915 {
1916 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1917
1918 if (!twa_command_mapped(cmd) &&
1919 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1920 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1921 if (scsi_sg_count(cmd) == 1) {
1922 void *buf = tw_dev->generic_buffer_virt[request_id];
1923
1924 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1925 }
1926 }
1927 } /* End twa_scsiop_execute_scsi_complete() */
1928
1929 /* This function tells the controller to shut down */
__twa_shutdown(TW_Device_Extension * tw_dev)1930 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1931 {
1932 /* Disable interrupts */
1933 TW_DISABLE_INTERRUPTS(tw_dev);
1934
1935 /* Free up the IRQ */
1936 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1937
1938 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1939
1940 /* Tell the card we are shutting down */
1941 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1942 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1943 } else {
1944 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1945 }
1946
1947 /* Clear all interrupts just before exit */
1948 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1949 } /* End __twa_shutdown() */
1950
1951 /* Wrapper for __twa_shutdown */
twa_shutdown(struct pci_dev * pdev)1952 static void twa_shutdown(struct pci_dev *pdev)
1953 {
1954 struct Scsi_Host *host = pci_get_drvdata(pdev);
1955 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1956
1957 __twa_shutdown(tw_dev);
1958 } /* End twa_shutdown() */
1959
1960 /* This function will look up a string */
twa_string_lookup(twa_message_type * table,unsigned int code)1961 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1962 {
1963 int index;
1964
1965 for (index = 0; ((code != table[index].code) &&
1966 (table[index].text != (char *)0)); index++);
1967 return(table[index].text);
1968 } /* End twa_string_lookup() */
1969
1970 /* This function gets called when a disk is coming on-line */
twa_slave_configure(struct scsi_device * sdev)1971 static int twa_slave_configure(struct scsi_device *sdev)
1972 {
1973 /* Force 60 second timeout */
1974 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1975
1976 return 0;
1977 } /* End twa_slave_configure() */
1978
1979 /* scsi_host_template initializer */
1980 static struct scsi_host_template driver_template = {
1981 .module = THIS_MODULE,
1982 .name = "3ware 9000 Storage Controller",
1983 .queuecommand = twa_scsi_queue,
1984 .eh_host_reset_handler = twa_scsi_eh_reset,
1985 .bios_param = twa_scsi_biosparam,
1986 .change_queue_depth = scsi_change_queue_depth,
1987 .can_queue = TW_Q_LENGTH-2,
1988 .slave_configure = twa_slave_configure,
1989 .this_id = -1,
1990 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1991 .max_sectors = TW_MAX_SECTORS,
1992 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1993 .shost_attrs = twa_host_attrs,
1994 .emulated = 1,
1995 .no_write_same = 1,
1996 };
1997
1998 /* This function will probe and initialize a card */
twa_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)1999 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2000 {
2001 struct Scsi_Host *host = NULL;
2002 TW_Device_Extension *tw_dev;
2003 unsigned long mem_addr, mem_len;
2004 int retval;
2005
2006 retval = pci_enable_device(pdev);
2007 if (retval) {
2008 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2009 goto out_disable_device;
2010 }
2011
2012 pci_set_master(pdev);
2013 pci_try_set_mwi(pdev);
2014
2015 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2016 if (retval)
2017 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2018 if (retval) {
2019 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2020 retval = -ENODEV;
2021 goto out_disable_device;
2022 }
2023
2024 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2025 if (!host) {
2026 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2027 retval = -ENOMEM;
2028 goto out_disable_device;
2029 }
2030 tw_dev = (TW_Device_Extension *)host->hostdata;
2031
2032 /* Save values to device extension */
2033 tw_dev->host = host;
2034 tw_dev->tw_pci_dev = pdev;
2035
2036 if (twa_initialize_device_extension(tw_dev)) {
2037 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2038 retval = -ENOMEM;
2039 goto out_free_device_extension;
2040 }
2041
2042 /* Request IO regions */
2043 retval = pci_request_regions(pdev, "3w-9xxx");
2044 if (retval) {
2045 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2046 goto out_free_device_extension;
2047 }
2048
2049 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2050 mem_addr = pci_resource_start(pdev, 1);
2051 mem_len = pci_resource_len(pdev, 1);
2052 } else {
2053 mem_addr = pci_resource_start(pdev, 2);
2054 mem_len = pci_resource_len(pdev, 2);
2055 }
2056
2057 /* Save base address */
2058 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2059 if (!tw_dev->base_addr) {
2060 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2061 retval = -ENOMEM;
2062 goto out_release_mem_region;
2063 }
2064
2065 /* Disable interrupts on the card */
2066 TW_DISABLE_INTERRUPTS(tw_dev);
2067
2068 /* Initialize the card */
2069 if (twa_reset_sequence(tw_dev, 0)) {
2070 retval = -ENOMEM;
2071 goto out_iounmap;
2072 }
2073
2074 /* Set host specific parameters */
2075 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2076 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2077 host->max_id = TW_MAX_UNITS_9650SE;
2078 else
2079 host->max_id = TW_MAX_UNITS;
2080
2081 host->max_cmd_len = TW_MAX_CDB_LEN;
2082
2083 /* Channels aren't supported by adapter */
2084 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2085 host->max_channel = 0;
2086
2087 /* Register the card with the kernel SCSI layer */
2088 retval = scsi_add_host(host, &pdev->dev);
2089 if (retval) {
2090 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2091 goto out_iounmap;
2092 }
2093
2094 pci_set_drvdata(pdev, host);
2095
2096 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2097 host->host_no, mem_addr, pdev->irq);
2098 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2099 host->host_no,
2100 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2101 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2102 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2103 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2104 le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2105 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2106
2107 /* Try to enable MSI */
2108 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2109 !pci_enable_msi(pdev))
2110 set_bit(TW_USING_MSI, &tw_dev->flags);
2111
2112 /* Now setup the interrupt handler */
2113 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2114 if (retval) {
2115 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2116 goto out_remove_host;
2117 }
2118
2119 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2120 twa_device_extension_count++;
2121
2122 /* Re-enable interrupts on the card */
2123 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2124
2125 /* Finally, scan the host */
2126 scsi_scan_host(host);
2127
2128 if (twa_major == -1) {
2129 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2130 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2131 }
2132 return 0;
2133
2134 out_remove_host:
2135 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2136 pci_disable_msi(pdev);
2137 scsi_remove_host(host);
2138 out_iounmap:
2139 iounmap(tw_dev->base_addr);
2140 out_release_mem_region:
2141 pci_release_regions(pdev);
2142 out_free_device_extension:
2143 twa_free_device_extension(tw_dev);
2144 scsi_host_put(host);
2145 out_disable_device:
2146 pci_disable_device(pdev);
2147
2148 return retval;
2149 } /* End twa_probe() */
2150
2151 /* This function is called to remove a device */
twa_remove(struct pci_dev * pdev)2152 static void twa_remove(struct pci_dev *pdev)
2153 {
2154 struct Scsi_Host *host = pci_get_drvdata(pdev);
2155 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2156
2157 scsi_remove_host(tw_dev->host);
2158
2159 /* Unregister character device */
2160 if (twa_major >= 0) {
2161 unregister_chrdev(twa_major, "twa");
2162 twa_major = -1;
2163 }
2164
2165 /* Shutdown the card */
2166 __twa_shutdown(tw_dev);
2167
2168 /* Disable MSI if enabled */
2169 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2170 pci_disable_msi(pdev);
2171
2172 /* Free IO remapping */
2173 iounmap(tw_dev->base_addr);
2174
2175 /* Free up the mem region */
2176 pci_release_regions(pdev);
2177
2178 /* Free up device extension resources */
2179 twa_free_device_extension(tw_dev);
2180
2181 scsi_host_put(tw_dev->host);
2182 pci_disable_device(pdev);
2183 twa_device_extension_count--;
2184 } /* End twa_remove() */
2185
2186 /* This function is called on PCI suspend */
twa_suspend(struct device * dev)2187 static int __maybe_unused twa_suspend(struct device *dev)
2188 {
2189 struct pci_dev *pdev = to_pci_dev(dev);
2190 struct Scsi_Host *host = pci_get_drvdata(pdev);
2191 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2192
2193 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2194
2195 TW_DISABLE_INTERRUPTS(tw_dev);
2196 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2197
2198 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199 pci_disable_msi(pdev);
2200
2201 /* Tell the card we are shutting down */
2202 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2203 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2204 } else {
2205 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2206 }
2207 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2208
2209 return 0;
2210 } /* End twa_suspend() */
2211
2212 /* This function is called on PCI resume */
twa_resume(struct device * dev)2213 static int __maybe_unused twa_resume(struct device *dev)
2214 {
2215 int retval = 0;
2216 struct pci_dev *pdev = to_pci_dev(dev);
2217 struct Scsi_Host *host = pci_get_drvdata(pdev);
2218 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2219
2220 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2221
2222 pci_try_set_mwi(pdev);
2223
2224 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2225 if (retval)
2226 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2227 if (retval) {
2228 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2229 retval = -ENODEV;
2230 goto out_disable_device;
2231 }
2232
2233 /* Initialize the card */
2234 if (twa_reset_sequence(tw_dev, 0)) {
2235 retval = -ENODEV;
2236 goto out_disable_device;
2237 }
2238
2239 /* Now setup the interrupt handler */
2240 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2241 if (retval) {
2242 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2243 retval = -ENODEV;
2244 goto out_disable_device;
2245 }
2246
2247 /* Now enable MSI if enabled */
2248 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2249 pci_enable_msi(pdev);
2250
2251 /* Re-enable interrupts on the card */
2252 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2253
2254 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2255 return 0;
2256
2257 out_disable_device:
2258 scsi_remove_host(host);
2259
2260 return retval;
2261 } /* End twa_resume() */
2262
2263 /* PCI Devices supported by this driver */
2264 static struct pci_device_id twa_pci_tbl[] = {
2265 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2266 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2267 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2268 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2269 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2270 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2271 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2273 { }
2274 };
2275 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2276
2277 static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
2278
2279 /* pci_driver initializer */
2280 static struct pci_driver twa_driver = {
2281 .name = "3w-9xxx",
2282 .id_table = twa_pci_tbl,
2283 .probe = twa_probe,
2284 .remove = twa_remove,
2285 .driver.pm = &twa_pm_ops,
2286 .shutdown = twa_shutdown
2287 };
2288
2289 /* This function is called on driver initialization */
twa_init(void)2290 static int __init twa_init(void)
2291 {
2292 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2293
2294 return pci_register_driver(&twa_driver);
2295 } /* End twa_init() */
2296
2297 /* This function is called on driver exit */
twa_exit(void)2298 static void __exit twa_exit(void)
2299 {
2300 pci_unregister_driver(&twa_driver);
2301 } /* End twa_exit() */
2302
2303 module_init(twa_init);
2304 module_exit(twa_exit);
2305
2306