1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3                           dpti.c  -  description
4                              -------------------
5     begin                : Thu Sep 7 2000
6     copyright            : (C) 2000 by Adaptec
7 
8 			   July 30, 2001 First version being submitted
9 			   for inclusion in the kernel.  V2.4
10 
11     See Documentation/scsi/dpti.rst for history, notes, license info
12     and credits
13  ***************************************************************************/
14 
15 /***************************************************************************
16  *                                                                         *
17  *                                                                         *
18  ***************************************************************************/
19 /***************************************************************************
20  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21  - Support 2.6 kernel and DMA-mapping
22  - ioctl fix for raid tools
23  - use schedule_timeout in long long loop
24  **************************************************************************/
25 
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28 
29 #include <linux/module.h>
30 #include <linux/pgtable.h>
31 
32 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34 
35 ////////////////////////////////////////////////////////////////
36 
37 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
38 #include <linux/uaccess.h>
39 
40 #include <linux/stat.h>
41 #include <linux/slab.h>		/* for kmalloc() */
42 #include <linux/pci.h>		/* for PCI support */
43 #include <linux/proc_fs.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>	/* for udelay */
46 #include <linux/interrupt.h>
47 #include <linux/kernel.h>	/* for printk */
48 #include <linux/sched.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/dma-mapping.h>
52 
53 #include <linux/timer.h>
54 #include <linux/string.h>
55 #include <linux/ioport.h>
56 #include <linux/mutex.h>
57 
58 #include <asm/processor.h>	/* for boot_cpu_data */
59 #include <asm/io.h>		/* for virt_to_bus, etc. */
60 
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66 
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69 
70 /*============================================================================
71  * Create a binary signature - this is read by dptsig
72  * Needed for our management apps
73  *============================================================================
74  */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 	PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 	PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 	PROC_ALPHA, PROC_ALPHA,
86 #else
87 	(-1),(-1),
88 #endif
89 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93 
94 
95 
96 
97 /*============================================================================
98  * Globals
99  *============================================================================
100  */
101 
102 static DEFINE_MUTEX(adpt_configuration_lock);
103 
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108 
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111 
112 static struct class *adpt_sysfs_class;
113 
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118 
119 static const struct file_operations adpt_fops = {
120 	.unlocked_ioctl	= adpt_unlocked_ioctl,
121 	.open		= adpt_open,
122 	.release	= adpt_close,
123 #ifdef CONFIG_COMPAT
124 	.compat_ioctl	= compat_adpt_ioctl,
125 #endif
126 	.llseek		= noop_llseek,
127 };
128 
129 /* Structures and definitions for synchronous message posting.
130  * See adpt_i2o_post_wait() for description
131  * */
132 struct adpt_i2o_post_wait_data
133 {
134 	int status;
135 	u32 id;
136 	adpt_wait_queue_head_t *wq;
137 	struct adpt_i2o_post_wait_data *next;
138 };
139 
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143 
144 
145 /*============================================================================
146  * 				Functions
147  *============================================================================
148  */
149 
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154 
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 	return upper_32_bits(addr);
158 }
159 
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 	return (u32)addr;
163 }
164 
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 	if (host->FwDebugBLEDflag_P) {
168 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 			return readb(host->FwDebugBLEDvalue_P);
170 		}
171 	}
172 	return 0;
173 }
174 
175 /*============================================================================
176  * Scsi host template interface functions
177  *============================================================================
178  */
179 
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 	{ 0, }
185 };
186 #endif
187 
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 	struct pci_dev *pDev = NULL;
193 	adpt_hba *pHba;
194 	adpt_hba *next;
195 
196 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198         /* search for all Adatpec I2O RAID cards */
199 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 		if(pDev->device == PCI_DPT_DEVICE_ID ||
201 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 			if(adpt_install_hba(sht, pDev) ){
203 				PERROR("Could not Init an I2O RAID device\n");
204 				PERROR("Will not try to detect others.\n");
205 				return hba_count-1;
206 			}
207 			pci_dev_get(pDev);
208 		}
209 	}
210 
211 	/* In INIT state, Activate IOPs */
212 	for (pHba = hba_chain; pHba; pHba = next) {
213 		next = pHba->next;
214 		// Activate does get status , init outbound, and get hrt
215 		if (adpt_i2o_activate_hba(pHba) < 0) {
216 			adpt_i2o_delete_hba(pHba);
217 		}
218 	}
219 
220 
221 	/* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224 	if (hba_chain == NULL)
225 		return 0;
226 
227 	/*
228 	 * If build_sys_table fails, we kill everything and bail
229 	 * as we can't init the IOPs w/o a system table
230 	 */
231 	if (adpt_i2o_build_sys_table() < 0) {
232 		adpt_i2o_sys_shutdown();
233 		return 0;
234 	}
235 
236 	PDEBUG("HBA's in HOLD state\n");
237 
238 	/* If IOP don't get online, we need to rebuild the System table */
239 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 		if (adpt_i2o_online_hba(pHba) < 0) {
241 			adpt_i2o_delete_hba(pHba);
242 			goto rebuild_sys_tab;
243 		}
244 	}
245 
246 	/* Active IOPs now in OPERATIONAL state */
247 	PDEBUG("HBA's in OPERATIONAL state\n");
248 
249 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 	for (pHba = hba_chain; pHba; pHba = next) {
251 		next = pHba->next;
252 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 		if (adpt_i2o_lct_get(pHba) < 0){
254 			adpt_i2o_delete_hba(pHba);
255 			continue;
256 		}
257 
258 		if (adpt_i2o_parse_lct(pHba) < 0){
259 			adpt_i2o_delete_hba(pHba);
260 			continue;
261 		}
262 		adpt_inquiry(pHba);
263 	}
264 
265 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 	if (IS_ERR(adpt_sysfs_class)) {
267 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 		adpt_sysfs_class = NULL;
269 	}
270 
271 	for (pHba = hba_chain; pHba; pHba = next) {
272 		next = pHba->next;
273 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 			adpt_i2o_delete_hba(pHba);
275 			continue;
276 		}
277 		pHba->initialized = TRUE;
278 		pHba->state &= ~DPTI_STATE_RESET;
279 		if (adpt_sysfs_class) {
280 			struct device *dev = device_create(adpt_sysfs_class,
281 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 				"dpti%d", pHba->unit);
283 			if (IS_ERR(dev)) {
284 				printk(KERN_WARNING"dpti%d: unable to "
285 					"create device in dpt_i2o class\n",
286 					pHba->unit);
287 			}
288 		}
289 	}
290 
291 	// Register our control device node
292 	// nodes will need to be created in /dev to access this
293 	// the nodes can not be created from within the driver
294 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 		adpt_i2o_sys_shutdown();
296 		return 0;
297 	}
298 	return hba_count;
299 }
300 
301 
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 	struct Scsi_Host *shost = pHba->host;
305 
306 	scsi_remove_host(shost);
307 //	adpt_i2o_quiesce_hba(pHba);
308 	adpt_i2o_delete_hba(pHba);
309 	scsi_host_put(shost);
310 }
311 
312 
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 	u32 msg[17];
316 	u32 *mptr;
317 	u32 *lenptr;
318 	int direction;
319 	int scsidir;
320 	u32 len;
321 	u32 reqlen;
322 	u8* buf;
323 	dma_addr_t addr;
324 	u8  scb[16];
325 	s32 rcode;
326 
327 	memset(msg, 0, sizeof(msg));
328 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 	if(!buf){
330 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 		return;
332 	}
333 	memset((void*)buf, 0, 36);
334 
335 	len = 36;
336 	direction = 0x00000000;
337 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
338 
339 	if (dpt_dma64(pHba))
340 		reqlen = 17;		// SINGLE SGE, 64 bit
341 	else
342 		reqlen = 14;		// SINGLE SGE, 32 bit
343 	/* Stick the headers on */
344 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 	msg[2] = 0;
347 	msg[3]  = 0;
348 	// Adaptec/DPT Private stuff
349 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356 
357 	mptr=msg+7;
358 
359 	memset(scb, 0, sizeof(scb));
360 	// Write SCSI command into the message - always 16 byte block
361 	scb[0] = INQUIRY;
362 	scb[1] = 0;
363 	scb[2] = 0;
364 	scb[3] = 0;
365 	scb[4] = 36;
366 	scb[5] = 0;
367 	// Don't care about the rest of scb
368 
369 	memcpy(mptr, scb, sizeof(scb));
370 	mptr+=4;
371 	lenptr=mptr++;		/* Remember me - fill in when we know */
372 
373 	/* Now fill in the SGList and command */
374 	*lenptr = len;
375 	if (dpt_dma64(pHba)) {
376 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 		*mptr++ = 1 << PAGE_SHIFT;
378 		*mptr++ = 0xD0000000|direction|len;
379 		*mptr++ = dma_low(addr);
380 		*mptr++ = dma_high(addr);
381 	} else {
382 		*mptr++ = 0xD0000000|direction|len;
383 		*mptr++ = addr;
384 	}
385 
386 	// Send it on it's way
387 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 	if (rcode != 0) {
389 		sprintf(pHba->detail, "Adaptec I2O RAID");
390 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 		if (rcode != -ETIME && rcode != -EINTR)
392 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 	} else {
394 		memset(pHba->detail, 0, sizeof(pHba->detail));
395 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 		memcpy(&(pHba->detail[16]), " Model: ", 8);
397 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 		memcpy(&(pHba->detail[40]), " FW: ", 4);
399 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 		pHba->detail[48] = '\0';	/* precautionary */
401 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 	}
403 	adpt_i2o_status_get(pHba);
404 	return ;
405 }
406 
407 
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 	struct Scsi_Host *host = device->host;
411 
412 	if (host->can_queue && device->tagged_supported) {
413 		scsi_change_queue_depth(device,
414 				host->can_queue - 1);
415 	}
416 	return 0;
417 }
418 
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))419 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
420 {
421 	adpt_hba* pHba = NULL;
422 	struct adpt_device* pDev = NULL;	/* dpt per device information */
423 
424 	cmd->scsi_done = done;
425 	/*
426 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
427 	 * Host Adapter for any errors, so they should not be executed
428 	 * explicitly unless the Sense Data is zero indicating that no error
429 	 * occurred.
430 	 */
431 
432 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
433 		cmd->result = (DID_OK << 16);
434 		cmd->scsi_done(cmd);
435 		return 0;
436 	}
437 
438 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
439 	if (!pHba) {
440 		return FAILED;
441 	}
442 
443 	rmb();
444 	if ((pHba->state) & DPTI_STATE_RESET)
445 		return SCSI_MLQUEUE_HOST_BUSY;
446 
447 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
448 	// followed by a get_lct to see if the device is there anymore
449 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
450 		/*
451 		 * First command request for this device.  Set up a pointer
452 		 * to the device structure.  This should be a TEST_UNIT_READY
453 		 * command from scan_scsis_single.
454 		 */
455 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
456 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
457 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
458 			cmd->result = (DID_NO_CONNECT << 16);
459 			cmd->scsi_done(cmd);
460 			return 0;
461 		}
462 		cmd->device->hostdata = pDev;
463 	}
464 	pDev->pScsi_dev = cmd->device;
465 
466 	/*
467 	 * If we are being called from when the device is being reset,
468 	 * delay processing of the command until later.
469 	 */
470 	if (pDev->state & DPTI_DEV_RESET ) {
471 		return FAILED;
472 	}
473 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
474 }
475 
DEF_SCSI_QCMD(adpt_queue)476 static DEF_SCSI_QCMD(adpt_queue)
477 
478 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
479 		sector_t capacity, int geom[])
480 {
481 	int heads=-1;
482 	int sectors=-1;
483 	int cylinders=-1;
484 
485 	// *** First lets set the default geometry ****
486 
487 	// If the capacity is less than ox2000
488 	if (capacity < 0x2000 ) {	// floppy
489 		heads = 18;
490 		sectors = 2;
491 	}
492 	// else if between 0x2000 and 0x20000
493 	else if (capacity < 0x20000) {
494 		heads = 64;
495 		sectors = 32;
496 	}
497 	// else if between 0x20000 and 0x40000
498 	else if (capacity < 0x40000) {
499 		heads = 65;
500 		sectors = 63;
501 	}
502 	// else if between 0x4000 and 0x80000
503 	else if (capacity < 0x80000) {
504 		heads = 128;
505 		sectors = 63;
506 	}
507 	// else if greater than 0x80000
508 	else {
509 		heads = 255;
510 		sectors = 63;
511 	}
512 	cylinders = sector_div(capacity, heads * sectors);
513 
514 	// Special case if CDROM
515 	if(sdev->type == 5) {  // CDROM
516 		heads = 252;
517 		sectors = 63;
518 		cylinders = 1111;
519 	}
520 
521 	geom[0] = heads;
522 	geom[1] = sectors;
523 	geom[2] = cylinders;
524 
525 	PDEBUG("adpt_bios_param: exit\n");
526 	return 0;
527 }
528 
529 
adpt_info(struct Scsi_Host * host)530 static const char *adpt_info(struct Scsi_Host *host)
531 {
532 	adpt_hba* pHba;
533 
534 	pHba = (adpt_hba *) host->hostdata[0];
535 	return (char *) (pHba->detail);
536 }
537 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)538 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
539 {
540 	struct adpt_device* d;
541 	int id;
542 	int chan;
543 	adpt_hba* pHba;
544 	int unit;
545 
546 	// Find HBA (host bus adapter) we are looking for
547 	mutex_lock(&adpt_configuration_lock);
548 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
549 		if (pHba->host == host) {
550 			break;	/* found adapter */
551 		}
552 	}
553 	mutex_unlock(&adpt_configuration_lock);
554 	if (pHba == NULL) {
555 		return 0;
556 	}
557 	host = pHba->host;
558 
559 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
560 	seq_printf(m, "%s\n", pHba->detail);
561 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
562 			pHba->host->host_no, pHba->name, host->irq);
563 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
564 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
565 
566 	seq_puts(m, "Devices:\n");
567 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
568 		for(id = 0; id < MAX_ID; id++) {
569 			d = pHba->channel[chan].device[id];
570 			while(d) {
571 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
572 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
573 
574 				unit = d->pI2o_dev->lct_data.tid;
575 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
576 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
577 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
578 				d = d->next_lun;
579 			}
580 		}
581 	}
582 	return 0;
583 }
584 
585 /*
586  *	Turn a pointer to ioctl reply data into an u32 'context'
587  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)588 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
589 {
590 #if BITS_PER_LONG == 32
591 	return (u32)(unsigned long)reply;
592 #else
593 	ulong flags = 0;
594 	u32 nr, i;
595 
596 	spin_lock_irqsave(pHba->host->host_lock, flags);
597 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
598 	for (i = 0; i < nr; i++) {
599 		if (pHba->ioctl_reply_context[i] == NULL) {
600 			pHba->ioctl_reply_context[i] = reply;
601 			break;
602 		}
603 	}
604 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
605 	if (i >= nr) {
606 		printk(KERN_WARNING"%s: Too many outstanding "
607 				"ioctl commands\n", pHba->name);
608 		return (u32)-1;
609 	}
610 
611 	return i;
612 #endif
613 }
614 
615 /*
616  *	Go from an u32 'context' to a pointer to ioctl reply data.
617  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)618 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
619 {
620 #if BITS_PER_LONG == 32
621 	return (void *)(unsigned long)context;
622 #else
623 	void *p = pHba->ioctl_reply_context[context];
624 	pHba->ioctl_reply_context[context] = NULL;
625 
626 	return p;
627 #endif
628 }
629 
630 /*===========================================================================
631  * Error Handling routines
632  *===========================================================================
633  */
634 
adpt_abort(struct scsi_cmnd * cmd)635 static int adpt_abort(struct scsi_cmnd * cmd)
636 {
637 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
638 	struct adpt_device* dptdevice;	/* dpt per device information */
639 	u32 msg[5];
640 	int rcode;
641 
642 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
643 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
644 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
645 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
646 		return FAILED;
647 	}
648 
649 	memset(msg, 0, sizeof(msg));
650 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
651 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
652 	msg[2] = 0;
653 	msg[3]= 0;
654 	/* Add 1 to avoid firmware treating it as invalid command */
655 	msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
656 	if (pHba->host)
657 		spin_lock_irq(pHba->host->host_lock);
658 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
659 	if (pHba->host)
660 		spin_unlock_irq(pHba->host->host_lock);
661 	if (rcode != 0) {
662 		if(rcode == -EOPNOTSUPP ){
663 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
664 			return FAILED;
665 		}
666 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
667 		return FAILED;
668 	}
669 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
670 	return SUCCESS;
671 }
672 
673 
674 #define I2O_DEVICE_RESET 0x27
675 // This is the same for BLK and SCSI devices
676 // NOTE this is wrong in the i2o.h definitions
677 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)678 static int adpt_device_reset(struct scsi_cmnd* cmd)
679 {
680 	adpt_hba* pHba;
681 	u32 msg[4];
682 	u32 rcode;
683 	int old_state;
684 	struct adpt_device* d = cmd->device->hostdata;
685 
686 	pHba = (void*) cmd->device->host->hostdata[0];
687 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
688 	if (!d) {
689 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
690 		return FAILED;
691 	}
692 	memset(msg, 0, sizeof(msg));
693 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
694 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
695 	msg[2] = 0;
696 	msg[3] = 0;
697 
698 	if (pHba->host)
699 		spin_lock_irq(pHba->host->host_lock);
700 	old_state = d->state;
701 	d->state |= DPTI_DEV_RESET;
702 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
703 	d->state = old_state;
704 	if (pHba->host)
705 		spin_unlock_irq(pHba->host->host_lock);
706 	if (rcode != 0) {
707 		if(rcode == -EOPNOTSUPP ){
708 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
709 			return FAILED;
710 		}
711 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
712 		return FAILED;
713 	} else {
714 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
715 		return SUCCESS;
716 	}
717 }
718 
719 
720 #define I2O_HBA_BUS_RESET 0x87
721 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)722 static int adpt_bus_reset(struct scsi_cmnd* cmd)
723 {
724 	adpt_hba* pHba;
725 	u32 msg[4];
726 	u32 rcode;
727 
728 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
729 	memset(msg, 0, sizeof(msg));
730 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
731 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
732 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
733 	msg[2] = 0;
734 	msg[3] = 0;
735 	if (pHba->host)
736 		spin_lock_irq(pHba->host->host_lock);
737 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
738 	if (pHba->host)
739 		spin_unlock_irq(pHba->host->host_lock);
740 	if (rcode != 0) {
741 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
742 		return FAILED;
743 	} else {
744 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
745 		return SUCCESS;
746 	}
747 }
748 
749 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)750 static int __adpt_reset(struct scsi_cmnd* cmd)
751 {
752 	adpt_hba* pHba;
753 	int rcode;
754 	char name[32];
755 
756 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
757 	strncpy(name, pHba->name, sizeof(name));
758 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
759 	rcode =  adpt_hba_reset(pHba);
760 	if(rcode == 0){
761 		printk(KERN_WARNING"%s: HBA reset complete\n", name);
762 		return SUCCESS;
763 	} else {
764 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
765 		return FAILED;
766 	}
767 }
768 
adpt_reset(struct scsi_cmnd * cmd)769 static int adpt_reset(struct scsi_cmnd* cmd)
770 {
771 	int rc;
772 
773 	spin_lock_irq(cmd->device->host->host_lock);
774 	rc = __adpt_reset(cmd);
775 	spin_unlock_irq(cmd->device->host->host_lock);
776 
777 	return rc;
778 }
779 
780 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)781 static int adpt_hba_reset(adpt_hba* pHba)
782 {
783 	int rcode;
784 
785 	pHba->state |= DPTI_STATE_RESET;
786 
787 	// Activate does get status , init outbound, and get hrt
788 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
789 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
790 		adpt_i2o_delete_hba(pHba);
791 		return rcode;
792 	}
793 
794 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
795 		adpt_i2o_delete_hba(pHba);
796 		return rcode;
797 	}
798 	PDEBUG("%s: in HOLD state\n",pHba->name);
799 
800 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
801 		adpt_i2o_delete_hba(pHba);
802 		return rcode;
803 	}
804 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
805 
806 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
807 		adpt_i2o_delete_hba(pHba);
808 		return rcode;
809 	}
810 
811 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
812 		adpt_i2o_delete_hba(pHba);
813 		return rcode;
814 	}
815 	pHba->state &= ~DPTI_STATE_RESET;
816 
817 	scsi_host_complete_all_commands(pHba->host, DID_RESET);
818 	return 0;	/* return success */
819 }
820 
821 /*===========================================================================
822  *
823  *===========================================================================
824  */
825 
826 
adpt_i2o_sys_shutdown(void)827 static void adpt_i2o_sys_shutdown(void)
828 {
829 	adpt_hba *pHba, *pNext;
830 	struct adpt_i2o_post_wait_data *p1, *old;
831 
832 	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
833 	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
834 	/* Delete all IOPs from the controller chain */
835 	/* They should have already been released by the
836 	 * scsi-core
837 	 */
838 	for (pHba = hba_chain; pHba; pHba = pNext) {
839 		pNext = pHba->next;
840 		adpt_i2o_delete_hba(pHba);
841 	}
842 
843 	/* Remove any timedout entries from the wait queue.  */
844 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
845 	/* Nothing should be outstanding at this point so just
846 	 * free them
847 	 */
848 	for(p1 = adpt_post_wait_queue; p1;) {
849 		old = p1;
850 		p1 = p1->next;
851 		kfree(old);
852 	}
853 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
854 	adpt_post_wait_queue = NULL;
855 
856 	printk(KERN_INFO "Adaptec I2O controllers down.\n");
857 }
858 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)859 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
860 {
861 
862 	adpt_hba* pHba = NULL;
863 	adpt_hba* p = NULL;
864 	ulong base_addr0_phys = 0;
865 	ulong base_addr1_phys = 0;
866 	u32 hba_map0_area_size = 0;
867 	u32 hba_map1_area_size = 0;
868 	void __iomem *base_addr_virt = NULL;
869 	void __iomem *msg_addr_virt = NULL;
870 	int dma64 = 0;
871 
872 	int raptorFlag = FALSE;
873 
874 	if(pci_enable_device(pDev)) {
875 		return -EINVAL;
876 	}
877 
878 	if (pci_request_regions(pDev, "dpt_i2o")) {
879 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
880 		return -EINVAL;
881 	}
882 
883 	pci_set_master(pDev);
884 
885 	/*
886 	 *	See if we should enable dma64 mode.
887 	 */
888 	if (sizeof(dma_addr_t) > 4 &&
889 	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
890 	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
891 		dma64 = 1;
892 
893 	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
894 		return -EINVAL;
895 
896 	/* adapter only supports message blocks below 4GB */
897 	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
898 
899 	base_addr0_phys = pci_resource_start(pDev,0);
900 	hba_map0_area_size = pci_resource_len(pDev,0);
901 
902 	// Check if standard PCI card or single BAR Raptor
903 	if(pDev->device == PCI_DPT_DEVICE_ID){
904 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
905 			// Raptor card with this device id needs 4M
906 			hba_map0_area_size = 0x400000;
907 		} else { // Not Raptor - it is a PCI card
908 			if(hba_map0_area_size > 0x100000 ){
909 				hba_map0_area_size = 0x100000;
910 			}
911 		}
912 	} else {// Raptor split BAR config
913 		// Use BAR1 in this configuration
914 		base_addr1_phys = pci_resource_start(pDev,1);
915 		hba_map1_area_size = pci_resource_len(pDev,1);
916 		raptorFlag = TRUE;
917 	}
918 
919 #if BITS_PER_LONG == 64
920 	/*
921 	 *	The original Adaptec 64 bit driver has this comment here:
922 	 *	"x86_64 machines need more optimal mappings"
923 	 *
924 	 *	I assume some HBAs report ridiculously large mappings
925 	 *	and we need to limit them on platforms with IOMMUs.
926 	 */
927 	if (raptorFlag == TRUE) {
928 		if (hba_map0_area_size > 128)
929 			hba_map0_area_size = 128;
930 		if (hba_map1_area_size > 524288)
931 			hba_map1_area_size = 524288;
932 	} else {
933 		if (hba_map0_area_size > 524288)
934 			hba_map0_area_size = 524288;
935 	}
936 #endif
937 
938 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
939 	if (!base_addr_virt) {
940 		pci_release_regions(pDev);
941 		PERROR("dpti: adpt_config_hba: io remap failed\n");
942 		return -EINVAL;
943 	}
944 
945         if(raptorFlag == TRUE) {
946 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
947 		if (!msg_addr_virt) {
948 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
949 			iounmap(base_addr_virt);
950 			pci_release_regions(pDev);
951 			return -EINVAL;
952 		}
953 	} else {
954 		msg_addr_virt = base_addr_virt;
955 	}
956 
957 	// Allocate and zero the data structure
958 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
959 	if (!pHba) {
960 		if (msg_addr_virt != base_addr_virt)
961 			iounmap(msg_addr_virt);
962 		iounmap(base_addr_virt);
963 		pci_release_regions(pDev);
964 		return -ENOMEM;
965 	}
966 
967 	mutex_lock(&adpt_configuration_lock);
968 
969 	if(hba_chain != NULL){
970 		for(p = hba_chain; p->next; p = p->next);
971 		p->next = pHba;
972 	} else {
973 		hba_chain = pHba;
974 	}
975 	pHba->next = NULL;
976 	pHba->unit = hba_count;
977 	sprintf(pHba->name, "dpti%d", hba_count);
978 	hba_count++;
979 
980 	mutex_unlock(&adpt_configuration_lock);
981 
982 	pHba->pDev = pDev;
983 	pHba->base_addr_phys = base_addr0_phys;
984 
985 	// Set up the Virtual Base Address of the I2O Device
986 	pHba->base_addr_virt = base_addr_virt;
987 	pHba->msg_addr_virt = msg_addr_virt;
988 	pHba->irq_mask = base_addr_virt+0x30;
989 	pHba->post_port = base_addr_virt+0x40;
990 	pHba->reply_port = base_addr_virt+0x44;
991 
992 	pHba->hrt = NULL;
993 	pHba->lct = NULL;
994 	pHba->lct_size = 0;
995 	pHba->status_block = NULL;
996 	pHba->post_count = 0;
997 	pHba->state = DPTI_STATE_RESET;
998 	pHba->pDev = pDev;
999 	pHba->devices = NULL;
1000 	pHba->dma64 = dma64;
1001 
1002 	// Initializing the spinlocks
1003 	spin_lock_init(&pHba->state_lock);
1004 	spin_lock_init(&adpt_post_wait_lock);
1005 
1006 	if(raptorFlag == 0){
1007 		printk(KERN_INFO "Adaptec I2O RAID controller"
1008 				 " %d at %p size=%x irq=%d%s\n",
1009 			hba_count-1, base_addr_virt,
1010 			hba_map0_area_size, pDev->irq,
1011 			dma64 ? " (64-bit DMA)" : "");
1012 	} else {
1013 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1014 			hba_count-1, pDev->irq,
1015 			dma64 ? " (64-bit DMA)" : "");
1016 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1017 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1018 	}
1019 
1020 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1021 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1022 		adpt_i2o_delete_hba(pHba);
1023 		return -EINVAL;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 
adpt_i2o_delete_hba(adpt_hba * pHba)1030 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1031 {
1032 	adpt_hba* p1;
1033 	adpt_hba* p2;
1034 	struct i2o_device* d;
1035 	struct i2o_device* next;
1036 	int i;
1037 	int j;
1038 	struct adpt_device* pDev;
1039 	struct adpt_device* pNext;
1040 
1041 
1042 	mutex_lock(&adpt_configuration_lock);
1043 	if(pHba->host){
1044 		free_irq(pHba->host->irq, pHba);
1045 	}
1046 	p2 = NULL;
1047 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1048 		if(p1 == pHba) {
1049 			if(p2) {
1050 				p2->next = p1->next;
1051 			} else {
1052 				hba_chain = p1->next;
1053 			}
1054 			break;
1055 		}
1056 	}
1057 
1058 	hba_count--;
1059 	mutex_unlock(&adpt_configuration_lock);
1060 
1061 	iounmap(pHba->base_addr_virt);
1062 	pci_release_regions(pHba->pDev);
1063 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1064 		iounmap(pHba->msg_addr_virt);
1065 	}
1066 	if(pHba->FwDebugBuffer_P)
1067 	   	iounmap(pHba->FwDebugBuffer_P);
1068 	if(pHba->hrt) {
1069 		dma_free_coherent(&pHba->pDev->dev,
1070 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1071 			pHba->hrt, pHba->hrt_pa);
1072 	}
1073 	if(pHba->lct) {
1074 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1075 			pHba->lct, pHba->lct_pa);
1076 	}
1077 	if(pHba->status_block) {
1078 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1079 			pHba->status_block, pHba->status_block_pa);
1080 	}
1081 	if(pHba->reply_pool) {
1082 		dma_free_coherent(&pHba->pDev->dev,
1083 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1084 			pHba->reply_pool, pHba->reply_pool_pa);
1085 	}
1086 
1087 	for(d = pHba->devices; d ; d = next){
1088 		next = d->next;
1089 		kfree(d);
1090 	}
1091 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1092 		for(j = 0; j < MAX_ID; j++){
1093 			if(pHba->channel[i].device[j] != NULL){
1094 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1095 					pNext = pDev->next_lun;
1096 					kfree(pDev);
1097 				}
1098 			}
1099 		}
1100 	}
1101 	pci_dev_put(pHba->pDev);
1102 	if (adpt_sysfs_class)
1103 		device_destroy(adpt_sysfs_class,
1104 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1105 	kfree(pHba);
1106 
1107 	if(hba_count <= 0){
1108 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1109 		if (adpt_sysfs_class) {
1110 			class_destroy(adpt_sysfs_class);
1111 			adpt_sysfs_class = NULL;
1112 		}
1113 	}
1114 }
1115 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1116 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1117 {
1118 	struct adpt_device* d;
1119 
1120 	if (chan >= MAX_CHANNEL)
1121 		return NULL;
1122 
1123 	d = pHba->channel[chan].device[id];
1124 	if(!d || d->tid == 0) {
1125 		return NULL;
1126 	}
1127 
1128 	/* If it is the only lun at that address then this should match*/
1129 	if(d->scsi_lun == lun){
1130 		return d;
1131 	}
1132 
1133 	/* else we need to look through all the luns */
1134 	for(d=d->next_lun ; d ; d = d->next_lun){
1135 		if(d->scsi_lun == lun){
1136 			return d;
1137 		}
1138 	}
1139 	return NULL;
1140 }
1141 
1142 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1143 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1144 {
1145 	// I used my own version of the WAIT_QUEUE_HEAD
1146 	// to handle some version differences
1147 	// When embedded in the kernel this could go back to the vanilla one
1148 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1149 	int status = 0;
1150 	ulong flags = 0;
1151 	struct adpt_i2o_post_wait_data *p1, *p2;
1152 	struct adpt_i2o_post_wait_data *wait_data =
1153 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1154 	DECLARE_WAITQUEUE(wait, current);
1155 
1156 	if (!wait_data)
1157 		return -ENOMEM;
1158 
1159 	/*
1160 	 * The spin locking is needed to keep anyone from playing
1161 	 * with the queue pointers and id while we do the same
1162 	 */
1163 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1164        // TODO we need a MORE unique way of getting ids
1165        // to support async LCT get
1166 	wait_data->next = adpt_post_wait_queue;
1167 	adpt_post_wait_queue = wait_data;
1168 	adpt_post_wait_id++;
1169 	adpt_post_wait_id &= 0x7fff;
1170 	wait_data->id =  adpt_post_wait_id;
1171 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1172 
1173 	wait_data->wq = &adpt_wq_i2o_post;
1174 	wait_data->status = -ETIMEDOUT;
1175 
1176 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1177 
1178 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1179 	timeout *= HZ;
1180 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1181 		set_current_state(TASK_INTERRUPTIBLE);
1182 		if(pHba->host)
1183 			spin_unlock_irq(pHba->host->host_lock);
1184 		if (!timeout)
1185 			schedule();
1186 		else{
1187 			timeout = schedule_timeout(timeout);
1188 			if (timeout == 0) {
1189 				// I/O issued, but cannot get result in
1190 				// specified time. Freeing resorces is
1191 				// dangerous.
1192 				status = -ETIME;
1193 			}
1194 		}
1195 		if(pHba->host)
1196 			spin_lock_irq(pHba->host->host_lock);
1197 	}
1198 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1199 
1200 	if(status == -ETIMEDOUT){
1201 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1202 		// We will have to free the wait_data memory during shutdown
1203 		return status;
1204 	}
1205 
1206 	/* Remove the entry from the queue.  */
1207 	p2 = NULL;
1208 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1209 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1210 		if(p1 == wait_data) {
1211 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1212 				status = -EOPNOTSUPP;
1213 			}
1214 			if(p2) {
1215 				p2->next = p1->next;
1216 			} else {
1217 				adpt_post_wait_queue = p1->next;
1218 			}
1219 			break;
1220 		}
1221 	}
1222 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1223 
1224 	kfree(wait_data);
1225 
1226 	return status;
1227 }
1228 
1229 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1230 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1231 {
1232 
1233 	u32 m = EMPTY_QUEUE;
1234 	u32 __iomem *msg;
1235 	ulong timeout = jiffies + 30*HZ;
1236 	do {
1237 		rmb();
1238 		m = readl(pHba->post_port);
1239 		if (m != EMPTY_QUEUE) {
1240 			break;
1241 		}
1242 		if(time_after(jiffies,timeout)){
1243 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1244 			return -ETIMEDOUT;
1245 		}
1246 		schedule_timeout_uninterruptible(1);
1247 	} while(m == EMPTY_QUEUE);
1248 
1249 	msg = pHba->msg_addr_virt + m;
1250 	memcpy_toio(msg, data, len);
1251 	wmb();
1252 
1253 	//post message
1254 	writel(m, pHba->post_port);
1255 	wmb();
1256 
1257 	return 0;
1258 }
1259 
1260 
adpt_i2o_post_wait_complete(u32 context,int status)1261 static void adpt_i2o_post_wait_complete(u32 context, int status)
1262 {
1263 	struct adpt_i2o_post_wait_data *p1 = NULL;
1264 	/*
1265 	 * We need to search through the adpt_post_wait
1266 	 * queue to see if the given message is still
1267 	 * outstanding.  If not, it means that the IOP
1268 	 * took longer to respond to the message than we
1269 	 * had allowed and timer has already expired.
1270 	 * Not much we can do about that except log
1271 	 * it for debug purposes, increase timeout, and recompile
1272 	 *
1273 	 * Lock needed to keep anyone from moving queue pointers
1274 	 * around while we're looking through them.
1275 	 */
1276 
1277 	context &= 0x7fff;
1278 
1279 	spin_lock(&adpt_post_wait_lock);
1280 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1281 		if(p1->id == context) {
1282 			p1->status = status;
1283 			spin_unlock(&adpt_post_wait_lock);
1284 			wake_up_interruptible(p1->wq);
1285 			return;
1286 		}
1287 	}
1288 	spin_unlock(&adpt_post_wait_lock);
1289         // If this happens we lose commands that probably really completed
1290 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1291 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1292 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1293 		printk(KERN_DEBUG"           %d\n",p1->id);
1294 	}
1295 	return;
1296 }
1297 
adpt_i2o_reset_hba(adpt_hba * pHba)1298 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1299 {
1300 	u32 msg[8];
1301 	u8* status;
1302 	dma_addr_t addr;
1303 	u32 m = EMPTY_QUEUE ;
1304 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1305 
1306 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1307 		timeout = jiffies + (25*HZ);
1308 	} else {
1309 		adpt_i2o_quiesce_hba(pHba);
1310 	}
1311 
1312 	do {
1313 		rmb();
1314 		m = readl(pHba->post_port);
1315 		if (m != EMPTY_QUEUE) {
1316 			break;
1317 		}
1318 		if(time_after(jiffies,timeout)){
1319 			printk(KERN_WARNING"Timeout waiting for message!\n");
1320 			return -ETIMEDOUT;
1321 		}
1322 		schedule_timeout_uninterruptible(1);
1323 	} while (m == EMPTY_QUEUE);
1324 
1325 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1326 	if(status == NULL) {
1327 		adpt_send_nop(pHba, m);
1328 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1329 		return -ENOMEM;
1330 	}
1331 
1332 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1333 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1334 	msg[2]=0;
1335 	msg[3]=0;
1336 	msg[4]=0;
1337 	msg[5]=0;
1338 	msg[6]=dma_low(addr);
1339 	msg[7]=dma_high(addr);
1340 
1341 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1342 	wmb();
1343 	writel(m, pHba->post_port);
1344 	wmb();
1345 
1346 	while(*status == 0){
1347 		if(time_after(jiffies,timeout)){
1348 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1349 			/* We lose 4 bytes of "status" here, but we cannot
1350 			   free these because controller may awake and corrupt
1351 			   those bytes at any time */
1352 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1353 			return -ETIMEDOUT;
1354 		}
1355 		rmb();
1356 		schedule_timeout_uninterruptible(1);
1357 	}
1358 
1359 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1360 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1361 		// Here we wait for message frame to become available
1362 		// indicated that reset has finished
1363 		do {
1364 			rmb();
1365 			m = readl(pHba->post_port);
1366 			if (m != EMPTY_QUEUE) {
1367 				break;
1368 			}
1369 			if(time_after(jiffies,timeout)){
1370 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1371 				/* We lose 4 bytes of "status" here, but we
1372 				   cannot free these because controller may
1373 				   awake and corrupt those bytes at any time */
1374 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1375 				return -ETIMEDOUT;
1376 			}
1377 			schedule_timeout_uninterruptible(1);
1378 		} while (m == EMPTY_QUEUE);
1379 		// Flush the offset
1380 		adpt_send_nop(pHba, m);
1381 	}
1382 	adpt_i2o_status_get(pHba);
1383 	if(*status == 0x02 ||
1384 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1385 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1386 				pHba->name);
1387 	} else {
1388 		PDEBUG("%s: Reset completed.\n", pHba->name);
1389 	}
1390 
1391 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1392 #ifdef UARTDELAY
1393 	// This delay is to allow someone attached to the card through the debug UART to
1394 	// set up the dump levels that they want before the rest of the initialization sequence
1395 	adpt_delay(20000);
1396 #endif
1397 	return 0;
1398 }
1399 
1400 
adpt_i2o_parse_lct(adpt_hba * pHba)1401 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1402 {
1403 	int i;
1404 	int max;
1405 	int tid;
1406 	struct i2o_device *d;
1407 	i2o_lct *lct = pHba->lct;
1408 	u8 bus_no = 0;
1409 	s16 scsi_id;
1410 	u64 scsi_lun;
1411 	u32 buf[10]; // larger than 7, or 8 ...
1412 	struct adpt_device* pDev;
1413 
1414 	if (lct == NULL) {
1415 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1416 		return -1;
1417 	}
1418 
1419 	max = lct->table_size;
1420 	max -= 3;
1421 	max /= 9;
1422 
1423 	for(i=0;i<max;i++) {
1424 		if( lct->lct_entry[i].user_tid != 0xfff){
1425 			/*
1426 			 * If we have hidden devices, we need to inform the upper layers about
1427 			 * the possible maximum id reference to handle device access when
1428 			 * an array is disassembled. This code has no other purpose but to
1429 			 * allow us future access to devices that are currently hidden
1430 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1431 			 */
1432 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1433 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1434 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1435 			    	continue;
1436 			}
1437 			tid = lct->lct_entry[i].tid;
1438 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1439 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1440 				continue;
1441 			}
1442 			bus_no = buf[0]>>16;
1443 			scsi_id = buf[1];
1444 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1445 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1446 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1447 				continue;
1448 			}
1449 			if (scsi_id >= MAX_ID){
1450 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1451 				continue;
1452 			}
1453 			if(bus_no > pHba->top_scsi_channel){
1454 				pHba->top_scsi_channel = bus_no;
1455 			}
1456 			if(scsi_id > pHba->top_scsi_id){
1457 				pHba->top_scsi_id = scsi_id;
1458 			}
1459 			if(scsi_lun > pHba->top_scsi_lun){
1460 				pHba->top_scsi_lun = scsi_lun;
1461 			}
1462 			continue;
1463 		}
1464 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1465 		if(d==NULL)
1466 		{
1467 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1468 			return -ENOMEM;
1469 		}
1470 
1471 		d->controller = pHba;
1472 		d->next = NULL;
1473 
1474 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1475 
1476 		d->flags = 0;
1477 		tid = d->lct_data.tid;
1478 		adpt_i2o_report_hba_unit(pHba, d);
1479 		adpt_i2o_install_device(pHba, d);
1480 	}
1481 	bus_no = 0;
1482 	for(d = pHba->devices; d ; d = d->next) {
1483 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1484 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1485 			tid = d->lct_data.tid;
1486 			// TODO get the bus_no from hrt-but for now they are in order
1487 			//bus_no =
1488 			if(bus_no > pHba->top_scsi_channel){
1489 				pHba->top_scsi_channel = bus_no;
1490 			}
1491 			pHba->channel[bus_no].type = d->lct_data.class_id;
1492 			pHba->channel[bus_no].tid = tid;
1493 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1494 			{
1495 				pHba->channel[bus_no].scsi_id = buf[1];
1496 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1497 			}
1498 			// TODO remove - this is just until we get from hrt
1499 			bus_no++;
1500 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1501 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1502 				break;
1503 			}
1504 		}
1505 	}
1506 
1507 	// Setup adpt_device table
1508 	for(d = pHba->devices; d ; d = d->next) {
1509 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1510 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1511 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1512 
1513 			tid = d->lct_data.tid;
1514 			scsi_id = -1;
1515 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1516 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1517 				bus_no = buf[0]>>16;
1518 				scsi_id = buf[1];
1519 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1520 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1521 					continue;
1522 				}
1523 				if (scsi_id >= MAX_ID) {
1524 					continue;
1525 				}
1526 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1527 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1528 					if(pDev == NULL) {
1529 						return -ENOMEM;
1530 					}
1531 					pHba->channel[bus_no].device[scsi_id] = pDev;
1532 				} else {
1533 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1534 							pDev->next_lun; pDev = pDev->next_lun){
1535 					}
1536 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1537 					if(pDev->next_lun == NULL) {
1538 						return -ENOMEM;
1539 					}
1540 					pDev = pDev->next_lun;
1541 				}
1542 				pDev->tid = tid;
1543 				pDev->scsi_channel = bus_no;
1544 				pDev->scsi_id = scsi_id;
1545 				pDev->scsi_lun = scsi_lun;
1546 				pDev->pI2o_dev = d;
1547 				d->owner = pDev;
1548 				pDev->type = (buf[0])&0xff;
1549 				pDev->flags = (buf[0]>>8)&0xff;
1550 				if(scsi_id > pHba->top_scsi_id){
1551 					pHba->top_scsi_id = scsi_id;
1552 				}
1553 				if(scsi_lun > pHba->top_scsi_lun){
1554 					pHba->top_scsi_lun = scsi_lun;
1555 				}
1556 			}
1557 			if(scsi_id == -1){
1558 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1559 						d->lct_data.identity_tag);
1560 			}
1561 		}
1562 	}
1563 	return 0;
1564 }
1565 
1566 
1567 /*
1568  *	Each I2O controller has a chain of devices on it - these match
1569  *	the useful parts of the LCT of the board.
1570  */
1571 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1572 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1573 {
1574 	mutex_lock(&adpt_configuration_lock);
1575 	d->controller=pHba;
1576 	d->owner=NULL;
1577 	d->next=pHba->devices;
1578 	d->prev=NULL;
1579 	if (pHba->devices != NULL){
1580 		pHba->devices->prev=d;
1581 	}
1582 	pHba->devices=d;
1583 	*d->dev_name = 0;
1584 
1585 	mutex_unlock(&adpt_configuration_lock);
1586 	return 0;
1587 }
1588 
adpt_open(struct inode * inode,struct file * file)1589 static int adpt_open(struct inode *inode, struct file *file)
1590 {
1591 	int minor;
1592 	adpt_hba* pHba;
1593 
1594 	mutex_lock(&adpt_mutex);
1595 	//TODO check for root access
1596 	//
1597 	minor = iminor(inode);
1598 	if (minor >= hba_count) {
1599 		mutex_unlock(&adpt_mutex);
1600 		return -ENXIO;
1601 	}
1602 	mutex_lock(&adpt_configuration_lock);
1603 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1604 		if (pHba->unit == minor) {
1605 			break;	/* found adapter */
1606 		}
1607 	}
1608 	if (pHba == NULL) {
1609 		mutex_unlock(&adpt_configuration_lock);
1610 		mutex_unlock(&adpt_mutex);
1611 		return -ENXIO;
1612 	}
1613 
1614 //	if(pHba->in_use){
1615 	//	mutex_unlock(&adpt_configuration_lock);
1616 //		return -EBUSY;
1617 //	}
1618 
1619 	pHba->in_use = 1;
1620 	mutex_unlock(&adpt_configuration_lock);
1621 	mutex_unlock(&adpt_mutex);
1622 
1623 	return 0;
1624 }
1625 
adpt_close(struct inode * inode,struct file * file)1626 static int adpt_close(struct inode *inode, struct file *file)
1627 {
1628 	int minor;
1629 	adpt_hba* pHba;
1630 
1631 	minor = iminor(inode);
1632 	if (minor >= hba_count) {
1633 		return -ENXIO;
1634 	}
1635 	mutex_lock(&adpt_configuration_lock);
1636 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1637 		if (pHba->unit == minor) {
1638 			break;	/* found adapter */
1639 		}
1640 	}
1641 	mutex_unlock(&adpt_configuration_lock);
1642 	if (pHba == NULL) {
1643 		return -ENXIO;
1644 	}
1645 
1646 	pHba->in_use = 0;
1647 
1648 	return 0;
1649 }
1650 
1651 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1652 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1653 {
1654 	u32 msg[MAX_MESSAGE_SIZE];
1655 	u32* reply = NULL;
1656 	u32 size = 0;
1657 	u32 reply_size = 0;
1658 	u32 __user *user_msg = arg;
1659 	u32 __user * user_reply = NULL;
1660 	void **sg_list = NULL;
1661 	u32 sg_offset = 0;
1662 	u32 sg_count = 0;
1663 	int sg_index = 0;
1664 	u32 i = 0;
1665 	u32 rcode = 0;
1666 	void *p = NULL;
1667 	dma_addr_t addr;
1668 	ulong flags = 0;
1669 
1670 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1671 	// get user msg size in u32s
1672 	if(get_user(size, &user_msg[0])){
1673 		return -EFAULT;
1674 	}
1675 	size = size>>16;
1676 
1677 	user_reply = &user_msg[size];
1678 	if(size > MAX_MESSAGE_SIZE){
1679 		return -EFAULT;
1680 	}
1681 	size *= 4; // Convert to bytes
1682 
1683 	/* Copy in the user's I2O command */
1684 	if(copy_from_user(msg, user_msg, size)) {
1685 		return -EFAULT;
1686 	}
1687 	get_user(reply_size, &user_reply[0]);
1688 	reply_size = reply_size>>16;
1689 	if(reply_size > REPLY_FRAME_SIZE){
1690 		reply_size = REPLY_FRAME_SIZE;
1691 	}
1692 	reply_size *= 4;
1693 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1694 	if(reply == NULL) {
1695 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1696 		return -ENOMEM;
1697 	}
1698 	sg_offset = (msg[0]>>4)&0xf;
1699 	msg[2] = 0x40000000; // IOCTL context
1700 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1701 	if (msg[3] == (u32)-1) {
1702 		rcode = -EBUSY;
1703 		goto free;
1704 	}
1705 
1706 	sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1707 	if (!sg_list) {
1708 		rcode = -ENOMEM;
1709 		goto free;
1710 	}
1711 	if(sg_offset) {
1712 		// TODO add 64 bit API
1713 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1714 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1715 		if (sg_count > pHba->sg_tablesize){
1716 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1717 			rcode = -EINVAL;
1718 			goto free;
1719 		}
1720 
1721 		for(i = 0; i < sg_count; i++) {
1722 			int sg_size;
1723 
1724 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1725 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1726 				rcode = -EINVAL;
1727 				goto cleanup;
1728 			}
1729 			sg_size = sg[i].flag_count & 0xffffff;
1730 			/* Allocate memory for the transfer */
1731 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1732 			if(!p) {
1733 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1734 						pHba->name,sg_size,i,sg_count);
1735 				rcode = -ENOMEM;
1736 				goto cleanup;
1737 			}
1738 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1739 			/* Copy in the user's SG buffer if necessary */
1740 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1741 				// sg_simple_element API is 32 bit
1742 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1743 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1744 					rcode = -EFAULT;
1745 					goto cleanup;
1746 				}
1747 			}
1748 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1749 			sg[i].addr_bus = addr;
1750 		}
1751 	}
1752 
1753 	do {
1754 		/*
1755 		 * Stop any new commands from enterring the
1756 		 * controller while processing the ioctl
1757 		 */
1758 		if (pHba->host) {
1759 			scsi_block_requests(pHba->host);
1760 			spin_lock_irqsave(pHba->host->host_lock, flags);
1761 		}
1762 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1763 		if (rcode != 0)
1764 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1765 					rcode, reply);
1766 		if (pHba->host) {
1767 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1768 			scsi_unblock_requests(pHba->host);
1769 		}
1770 	} while (rcode == -ETIMEDOUT);
1771 
1772 	if(rcode){
1773 		goto cleanup;
1774 	}
1775 
1776 	if(sg_offset) {
1777 	/* Copy back the Scatter Gather buffers back to user space */
1778 		u32 j;
1779 		// TODO add 64 bit API
1780 		struct sg_simple_element* sg;
1781 		int sg_size;
1782 
1783 		// re-acquire the original message to handle correctly the sg copy operation
1784 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1785 		// get user msg size in u32s
1786 		if(get_user(size, &user_msg[0])){
1787 			rcode = -EFAULT;
1788 			goto cleanup;
1789 		}
1790 		size = size>>16;
1791 		size *= 4;
1792 		if (size > MAX_MESSAGE_SIZE) {
1793 			rcode = -EINVAL;
1794 			goto cleanup;
1795 		}
1796 		/* Copy in the user's I2O command */
1797 		if (copy_from_user (msg, user_msg, size)) {
1798 			rcode = -EFAULT;
1799 			goto cleanup;
1800 		}
1801 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1802 
1803 		// TODO add 64 bit API
1804 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1805 		for (j = 0; j < sg_count; j++) {
1806 			/* Copy out the SG list to user's buffer if necessary */
1807 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1808 				sg_size = sg[j].flag_count & 0xffffff;
1809 				// sg_simple_element API is 32 bit
1810 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1811 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1812 					rcode = -EFAULT;
1813 					goto cleanup;
1814 				}
1815 			}
1816 		}
1817 	}
1818 
1819 	/* Copy back the reply to user space */
1820 	if (reply_size) {
1821 		// we wrote our own values for context - now restore the user supplied ones
1822 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1823 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1824 			rcode = -EFAULT;
1825 		}
1826 		if(copy_to_user(user_reply, reply, reply_size)) {
1827 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1828 			rcode = -EFAULT;
1829 		}
1830 	}
1831 
1832 
1833 cleanup:
1834 	if (rcode != -ETIME && rcode != -EINTR) {
1835 		struct sg_simple_element *sg =
1836 				(struct sg_simple_element*) (msg +sg_offset);
1837 		while(sg_index) {
1838 			if(sg_list[--sg_index]) {
1839 				dma_free_coherent(&pHba->pDev->dev,
1840 					sg[sg_index].flag_count & 0xffffff,
1841 					sg_list[sg_index],
1842 					sg[sg_index].addr_bus);
1843 			}
1844 		}
1845 	}
1846 
1847 free:
1848 	kfree(sg_list);
1849 	kfree(reply);
1850 	return rcode;
1851 }
1852 
1853 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1854 static void adpt_ia64_info(sysInfo_S* si)
1855 {
1856 	// This is all the info we need for now
1857 	// We will add more info as our new
1858 	// managmenent utility requires it
1859 	si->processorType = PROC_IA64;
1860 }
1861 #endif
1862 
1863 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1864 static void adpt_sparc_info(sysInfo_S* si)
1865 {
1866 	// This is all the info we need for now
1867 	// We will add more info as our new
1868 	// managmenent utility requires it
1869 	si->processorType = PROC_ULTRASPARC;
1870 }
1871 #endif
1872 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1873 static void adpt_alpha_info(sysInfo_S* si)
1874 {
1875 	// This is all the info we need for now
1876 	// We will add more info as our new
1877 	// managmenent utility requires it
1878 	si->processorType = PROC_ALPHA;
1879 }
1880 #endif
1881 
1882 #if defined __i386__
1883 
1884 #include <uapi/asm/vm86.h>
1885 
adpt_i386_info(sysInfo_S * si)1886 static void adpt_i386_info(sysInfo_S* si)
1887 {
1888 	// This is all the info we need for now
1889 	// We will add more info as our new
1890 	// managmenent utility requires it
1891 	switch (boot_cpu_data.x86) {
1892 	case CPU_386:
1893 		si->processorType = PROC_386;
1894 		break;
1895 	case CPU_486:
1896 		si->processorType = PROC_486;
1897 		break;
1898 	case CPU_586:
1899 		si->processorType = PROC_PENTIUM;
1900 		break;
1901 	default:  // Just in case
1902 		si->processorType = PROC_PENTIUM;
1903 		break;
1904 	}
1905 }
1906 #endif
1907 
1908 /*
1909  * This routine returns information about the system.  This does not effect
1910  * any logic and if the info is wrong - it doesn't matter.
1911  */
1912 
1913 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1914 static int adpt_system_info(void __user *buffer)
1915 {
1916 	sysInfo_S si;
1917 
1918 	memset(&si, 0, sizeof(si));
1919 
1920 	si.osType = OS_LINUX;
1921 	si.osMajorVersion = 0;
1922 	si.osMinorVersion = 0;
1923 	si.osRevision = 0;
1924 	si.busType = SI_PCI_BUS;
1925 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1926 
1927 #if defined __i386__
1928 	adpt_i386_info(&si);
1929 #elif defined (__ia64__)
1930 	adpt_ia64_info(&si);
1931 #elif defined(__sparc__)
1932 	adpt_sparc_info(&si);
1933 #elif defined (__alpha__)
1934 	adpt_alpha_info(&si);
1935 #else
1936 	si.processorType = 0xff ;
1937 #endif
1938 	if (copy_to_user(buffer, &si, sizeof(si))){
1939 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1940 		return -EFAULT;
1941 	}
1942 
1943 	return 0;
1944 }
1945 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1946 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1947 {
1948 	int minor;
1949 	int error = 0;
1950 	adpt_hba* pHba;
1951 	ulong flags = 0;
1952 	void __user *argp = (void __user *)arg;
1953 
1954 	minor = iminor(inode);
1955 	if (minor >= DPTI_MAX_HBA){
1956 		return -ENXIO;
1957 	}
1958 	mutex_lock(&adpt_configuration_lock);
1959 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1960 		if (pHba->unit == minor) {
1961 			break;	/* found adapter */
1962 		}
1963 	}
1964 	mutex_unlock(&adpt_configuration_lock);
1965 	if(pHba == NULL){
1966 		return -ENXIO;
1967 	}
1968 
1969 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
1970 		schedule_timeout_uninterruptible(2);
1971 
1972 	switch (cmd) {
1973 	// TODO: handle 3 cases
1974 	case DPT_SIGNATURE:
1975 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1976 			return -EFAULT;
1977 		}
1978 		break;
1979 	case I2OUSRCMD:
1980 		return adpt_i2o_passthru(pHba, argp);
1981 
1982 	case DPT_CTRLINFO:{
1983 		drvrHBAinfo_S HbaInfo;
1984 
1985 #define FLG_OSD_PCI_VALID 0x0001
1986 #define FLG_OSD_DMA	  0x0002
1987 #define FLG_OSD_I2O	  0x0004
1988 		memset(&HbaInfo, 0, sizeof(HbaInfo));
1989 		HbaInfo.drvrHBAnum = pHba->unit;
1990 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1991 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
1992 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1993 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1994 		HbaInfo.Interrupt = pHba->pDev->irq;
1995 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1996 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1997 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1998 			return -EFAULT;
1999 		}
2000 		break;
2001 		}
2002 	case DPT_SYSINFO:
2003 		return adpt_system_info(argp);
2004 	case DPT_BLINKLED:{
2005 		u32 value;
2006 		value = (u32)adpt_read_blink_led(pHba);
2007 		if (copy_to_user(argp, &value, sizeof(value))) {
2008 			return -EFAULT;
2009 		}
2010 		break;
2011 		}
2012 	case I2ORESETCMD: {
2013 		struct Scsi_Host *shost = pHba->host;
2014 
2015 		if (shost)
2016 			spin_lock_irqsave(shost->host_lock, flags);
2017 		adpt_hba_reset(pHba);
2018 		if (shost)
2019 			spin_unlock_irqrestore(shost->host_lock, flags);
2020 		break;
2021 	}
2022 	case I2ORESCANCMD:
2023 		adpt_rescan(pHba);
2024 		break;
2025 	default:
2026 		return -EINVAL;
2027 	}
2028 
2029 	return error;
2030 }
2031 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2032 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2033 {
2034 	struct inode *inode;
2035 	long ret;
2036 
2037 	inode = file_inode(file);
2038 
2039 	mutex_lock(&adpt_mutex);
2040 	ret = adpt_ioctl(inode, file, cmd, arg);
2041 	mutex_unlock(&adpt_mutex);
2042 
2043 	return ret;
2044 }
2045 
2046 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2047 static long compat_adpt_ioctl(struct file *file,
2048 				unsigned int cmd, unsigned long arg)
2049 {
2050 	struct inode *inode;
2051 	long ret;
2052 
2053 	inode = file_inode(file);
2054 
2055 	mutex_lock(&adpt_mutex);
2056 
2057 	switch(cmd) {
2058 		case DPT_SIGNATURE:
2059 		case I2OUSRCMD:
2060 		case DPT_CTRLINFO:
2061 		case DPT_SYSINFO:
2062 		case DPT_BLINKLED:
2063 		case I2ORESETCMD:
2064 		case I2ORESCANCMD:
2065 		case (DPT_TARGET_BUSY & 0xFFFF):
2066 		case DPT_TARGET_BUSY:
2067 			ret = adpt_ioctl(inode, file, cmd, arg);
2068 			break;
2069 		default:
2070 			ret =  -ENOIOCTLCMD;
2071 	}
2072 
2073 	mutex_unlock(&adpt_mutex);
2074 
2075 	return ret;
2076 }
2077 #endif
2078 
adpt_isr(int irq,void * dev_id)2079 static irqreturn_t adpt_isr(int irq, void *dev_id)
2080 {
2081 	struct scsi_cmnd* cmd;
2082 	adpt_hba* pHba = dev_id;
2083 	u32 m;
2084 	void __iomem *reply;
2085 	u32 status=0;
2086 	u32 context;
2087 	ulong flags = 0;
2088 	int handled = 0;
2089 
2090 	if (pHba == NULL){
2091 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2092 		return IRQ_NONE;
2093 	}
2094 	if(pHba->host)
2095 		spin_lock_irqsave(pHba->host->host_lock, flags);
2096 
2097 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2098 		m = readl(pHba->reply_port);
2099 		if(m == EMPTY_QUEUE){
2100 			// Try twice then give up
2101 			rmb();
2102 			m = readl(pHba->reply_port);
2103 			if(m == EMPTY_QUEUE){
2104 				// This really should not happen
2105 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2106 				goto out;
2107 			}
2108 		}
2109 		if (pHba->reply_pool_pa <= m &&
2110 		    m < pHba->reply_pool_pa +
2111 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2112 			reply = (u8 *)pHba->reply_pool +
2113 						(m - pHba->reply_pool_pa);
2114 		} else {
2115 			/* Ick, we should *never* be here */
2116 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2117 			reply = (u8 *)bus_to_virt(m);
2118 		}
2119 
2120 		if (readl(reply) & MSG_FAIL) {
2121 			u32 old_m = readl(reply+28);
2122 			void __iomem *msg;
2123 			u32 old_context;
2124 			PDEBUG("%s: Failed message\n",pHba->name);
2125 			if(old_m >= 0x100000){
2126 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2127 				writel(m,pHba->reply_port);
2128 				continue;
2129 			}
2130 			// Transaction context is 0 in failed reply frame
2131 			msg = pHba->msg_addr_virt + old_m;
2132 			old_context = readl(msg+12);
2133 			writel(old_context, reply+12);
2134 			adpt_send_nop(pHba, old_m);
2135 		}
2136 		context = readl(reply+8);
2137 		if(context & 0x40000000){ // IOCTL
2138 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2139 			if( p != NULL) {
2140 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2141 			}
2142 			// All IOCTLs will also be post wait
2143 		}
2144 		if(context & 0x80000000){ // Post wait message
2145 			status = readl(reply+16);
2146 			if(status  >> 24){
2147 				status &=  0xffff; /* Get detail status */
2148 			} else {
2149 				status = I2O_POST_WAIT_OK;
2150 			}
2151 			if(!(context & 0x40000000)) {
2152 				/*
2153 				 * The request tag is one less than the command tag
2154 				 * as the firmware might treat a 0 tag as invalid
2155 				 */
2156 				cmd = scsi_host_find_tag(pHba->host,
2157 							 readl(reply + 12) - 1);
2158 				if(cmd != NULL) {
2159 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2160 				}
2161 			}
2162 			adpt_i2o_post_wait_complete(context, status);
2163 		} else { // SCSI message
2164 			/*
2165 			 * The request tag is one less than the command tag
2166 			 * as the firmware might treat a 0 tag as invalid
2167 			 */
2168 			cmd = scsi_host_find_tag(pHba->host,
2169 						 readl(reply + 12) - 1);
2170 			if(cmd != NULL){
2171 				scsi_dma_unmap(cmd);
2172 				adpt_i2o_scsi_complete(reply, cmd);
2173 			}
2174 		}
2175 		writel(m, pHba->reply_port);
2176 		wmb();
2177 		rmb();
2178 	}
2179 	handled = 1;
2180 out:	if(pHba->host)
2181 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2182 	return IRQ_RETVAL(handled);
2183 }
2184 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2185 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2186 {
2187 	int i;
2188 	u32 msg[MAX_MESSAGE_SIZE];
2189 	u32* mptr;
2190 	u32* lptr;
2191 	u32 *lenptr;
2192 	int direction;
2193 	int scsidir;
2194 	int nseg;
2195 	u32 len;
2196 	u32 reqlen;
2197 	s32 rcode;
2198 	dma_addr_t addr;
2199 
2200 	memset(msg, 0 , sizeof(msg));
2201 	len = scsi_bufflen(cmd);
2202 	direction = 0x00000000;
2203 
2204 	scsidir = 0x00000000;			// DATA NO XFER
2205 	if(len) {
2206 		/*
2207 		 * Set SCBFlags to indicate if data is being transferred
2208 		 * in or out, or no data transfer
2209 		 * Note:  Do not have to verify index is less than 0 since
2210 		 * cmd->cmnd[0] is an unsigned char
2211 		 */
2212 		switch(cmd->sc_data_direction){
2213 		case DMA_FROM_DEVICE:
2214 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2215 			break;
2216 		case DMA_TO_DEVICE:
2217 			direction=0x04000000;	// SGL OUT
2218 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2219 			break;
2220 		case DMA_NONE:
2221 			break;
2222 		case DMA_BIDIRECTIONAL:
2223 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2224 			// Assume In - and continue;
2225 			break;
2226 		default:
2227 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2228 			     pHba->name, cmd->cmnd[0]);
2229 			cmd->result = (DID_ERROR <<16);
2230 			cmd->scsi_done(cmd);
2231 			return 	0;
2232 		}
2233 	}
2234 	// msg[0] is set later
2235 	// I2O_CMD_SCSI_EXEC
2236 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2237 	msg[2] = 0;
2238 	/* Add 1 to avoid firmware treating it as invalid command */
2239 	msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
2240 	// Our cards use the transaction context as the tag for queueing
2241 	// Adaptec/DPT Private stuff
2242 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2243 	msg[5] = d->tid;
2244 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2245 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2246 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2247 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2248 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2249 
2250 	mptr=msg+7;
2251 
2252 	// Write SCSI command into the message - always 16 byte block
2253 	memset(mptr, 0,  16);
2254 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2255 	mptr+=4;
2256 	lenptr=mptr++;		/* Remember me - fill in when we know */
2257 	if (dpt_dma64(pHba)) {
2258 		reqlen = 16;		// SINGLE SGE
2259 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2260 		*mptr++ = 1 << PAGE_SHIFT;
2261 	} else {
2262 		reqlen = 14;		// SINGLE SGE
2263 	}
2264 	/* Now fill in the SGList and command */
2265 
2266 	nseg = scsi_dma_map(cmd);
2267 	BUG_ON(nseg < 0);
2268 	if (nseg) {
2269 		struct scatterlist *sg;
2270 
2271 		len = 0;
2272 		scsi_for_each_sg(cmd, sg, nseg, i) {
2273 			lptr = mptr;
2274 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2275 			len+=sg_dma_len(sg);
2276 			addr = sg_dma_address(sg);
2277 			*mptr++ = dma_low(addr);
2278 			if (dpt_dma64(pHba))
2279 				*mptr++ = dma_high(addr);
2280 			/* Make this an end of list */
2281 			if (i == nseg - 1)
2282 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2283 		}
2284 		reqlen = mptr - msg;
2285 		*lenptr = len;
2286 
2287 		if(cmd->underflow && len != cmd->underflow){
2288 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2289 				len, cmd->underflow);
2290 		}
2291 	} else {
2292 		*lenptr = len = 0;
2293 		reqlen = 12;
2294 	}
2295 
2296 	/* Stick the headers on */
2297 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2298 
2299 	// Send it on it's way
2300 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2301 	if (rcode == 0) {
2302 		return 0;
2303 	}
2304 	return rcode;
2305 }
2306 
2307 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2308 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2309 {
2310 	struct Scsi_Host *host;
2311 
2312 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2313 	if (host == NULL) {
2314 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2315 		return -1;
2316 	}
2317 	host->hostdata[0] = (unsigned long)pHba;
2318 	pHba->host = host;
2319 
2320 	host->irq = pHba->pDev->irq;
2321 	/* no IO ports, so don't have to set host->io_port and
2322 	 * host->n_io_port
2323 	 */
2324 	host->io_port = 0;
2325 	host->n_io_port = 0;
2326 				/* see comments in scsi_host.h */
2327 	host->max_id = 16;
2328 	host->max_lun = 256;
2329 	host->max_channel = pHba->top_scsi_channel + 1;
2330 	host->cmd_per_lun = 1;
2331 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2332 	host->sg_tablesize = pHba->sg_tablesize;
2333 	host->can_queue = pHba->post_fifo_size;
2334 
2335 	return 0;
2336 }
2337 
2338 
adpt_i2o_scsi_complete(void __iomem * reply,struct scsi_cmnd * cmd)2339 static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2340 {
2341 	adpt_hba* pHba;
2342 	u32 hba_status;
2343 	u32 dev_status;
2344 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2345 	// I know this would look cleaner if I just read bytes
2346 	// but the model I have been using for all the rest of the
2347 	// io is in 4 byte words - so I keep that model
2348 	u16 detailed_status = readl(reply+16) &0xffff;
2349 	dev_status = (detailed_status & 0xff);
2350 	hba_status = detailed_status >> 8;
2351 
2352 	// calculate resid for sg
2353 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2354 
2355 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2356 
2357 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2358 
2359 	if(!(reply_flags & MSG_FAIL)) {
2360 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2361 		case I2O_SCSI_DSC_SUCCESS:
2362 			cmd->result = (DID_OK << 16);
2363 			// handle underflow
2364 			if (readl(reply+20) < cmd->underflow) {
2365 				cmd->result = (DID_ERROR <<16);
2366 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2367 			}
2368 			break;
2369 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2370 			cmd->result = (DID_ABORT << 16);
2371 			break;
2372 		case I2O_SCSI_DSC_PATH_INVALID:
2373 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2374 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2375 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2376 		case I2O_SCSI_DSC_NO_ADAPTER:
2377 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2378 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2379 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2380 			cmd->result = (DID_TIME_OUT << 16);
2381 			break;
2382 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2383 		case I2O_SCSI_DSC_BUS_BUSY:
2384 			cmd->result = (DID_BUS_BUSY << 16);
2385 			break;
2386 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2387 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2388 			cmd->result = (DID_RESET << 16);
2389 			break;
2390 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2391 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2392 			cmd->result = (DID_PARITY << 16);
2393 			break;
2394 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2395 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2396 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2397 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2398 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2399 		case I2O_SCSI_DSC_DATA_OVERRUN:
2400 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2401 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2402 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2403 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2404 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2405 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2406 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2407 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2408 		case I2O_SCSI_DSC_INVALID_CDB:
2409 		case I2O_SCSI_DSC_LUN_INVALID:
2410 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2411 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2412 		case I2O_SCSI_DSC_NO_NEXUS:
2413 		case I2O_SCSI_DSC_CDB_RECEIVED:
2414 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2415 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2416 		case I2O_SCSI_DSC_REQUEST_INVALID:
2417 		default:
2418 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2419 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2420 			       hba_status, dev_status, cmd->cmnd[0]);
2421 			cmd->result = (DID_ERROR << 16);
2422 			break;
2423 		}
2424 
2425 		// copy over the request sense data if it was a check
2426 		// condition status
2427 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2428 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2429 			// Copy over the sense data
2430 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2431 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2432 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2433 				/* This is to handle an array failed */
2434 				cmd->result = (DID_TIME_OUT << 16);
2435 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2436 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2437 					hba_status, dev_status, cmd->cmnd[0]);
2438 
2439 			}
2440 		}
2441 	} else {
2442 		/* In this condtion we could not talk to the tid
2443 		 * the card rejected it.  We should signal a retry
2444 		 * for a limitted number of retries.
2445 		 */
2446 		cmd->result = (DID_TIME_OUT << 16);
2447 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2448 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2449 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2450 	}
2451 
2452 	cmd->result |= (dev_status);
2453 
2454 	if(cmd->scsi_done != NULL){
2455 		cmd->scsi_done(cmd);
2456 	}
2457 }
2458 
2459 
adpt_rescan(adpt_hba * pHba)2460 static s32 adpt_rescan(adpt_hba* pHba)
2461 {
2462 	s32 rcode;
2463 	ulong flags = 0;
2464 
2465 	if(pHba->host)
2466 		spin_lock_irqsave(pHba->host->host_lock, flags);
2467 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2468 		goto out;
2469 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2470 		goto out;
2471 	rcode = 0;
2472 out:	if(pHba->host)
2473 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2474 	return rcode;
2475 }
2476 
2477 
adpt_i2o_reparse_lct(adpt_hba * pHba)2478 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2479 {
2480 	int i;
2481 	int max;
2482 	int tid;
2483 	struct i2o_device *d;
2484 	i2o_lct *lct = pHba->lct;
2485 	u8 bus_no = 0;
2486 	s16 scsi_id;
2487 	u64 scsi_lun;
2488 	u32 buf[10]; // at least 8 u32's
2489 	struct adpt_device* pDev = NULL;
2490 	struct i2o_device* pI2o_dev = NULL;
2491 
2492 	if (lct == NULL) {
2493 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2494 		return -1;
2495 	}
2496 
2497 	max = lct->table_size;
2498 	max -= 3;
2499 	max /= 9;
2500 
2501 	// Mark each drive as unscanned
2502 	for (d = pHba->devices; d; d = d->next) {
2503 		pDev =(struct adpt_device*) d->owner;
2504 		if(!pDev){
2505 			continue;
2506 		}
2507 		pDev->state |= DPTI_DEV_UNSCANNED;
2508 	}
2509 
2510 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2511 
2512 	for(i=0;i<max;i++) {
2513 		if( lct->lct_entry[i].user_tid != 0xfff){
2514 			continue;
2515 		}
2516 
2517 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2518 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2519 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2520 			tid = lct->lct_entry[i].tid;
2521 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2522 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2523 				continue;
2524 			}
2525 			bus_no = buf[0]>>16;
2526 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2527 				printk(KERN_WARNING
2528 					"%s: Channel number %d out of range\n",
2529 					pHba->name, bus_no);
2530 				continue;
2531 			}
2532 
2533 			scsi_id = buf[1];
2534 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2535 			pDev = pHba->channel[bus_no].device[scsi_id];
2536 			/* da lun */
2537 			while(pDev) {
2538 				if(pDev->scsi_lun == scsi_lun) {
2539 					break;
2540 				}
2541 				pDev = pDev->next_lun;
2542 			}
2543 			if(!pDev ) { // Something new add it
2544 				d = kmalloc(sizeof(struct i2o_device),
2545 					    GFP_ATOMIC);
2546 				if(d==NULL)
2547 				{
2548 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2549 					return -ENOMEM;
2550 				}
2551 
2552 				d->controller = pHba;
2553 				d->next = NULL;
2554 
2555 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2556 
2557 				d->flags = 0;
2558 				adpt_i2o_report_hba_unit(pHba, d);
2559 				adpt_i2o_install_device(pHba, d);
2560 
2561 				pDev = pHba->channel[bus_no].device[scsi_id];
2562 				if( pDev == NULL){
2563 					pDev =
2564 					  kzalloc(sizeof(struct adpt_device),
2565 						  GFP_ATOMIC);
2566 					if(pDev == NULL) {
2567 						return -ENOMEM;
2568 					}
2569 					pHba->channel[bus_no].device[scsi_id] = pDev;
2570 				} else {
2571 					while (pDev->next_lun) {
2572 						pDev = pDev->next_lun;
2573 					}
2574 					pDev = pDev->next_lun =
2575 					  kzalloc(sizeof(struct adpt_device),
2576 						  GFP_ATOMIC);
2577 					if(pDev == NULL) {
2578 						return -ENOMEM;
2579 					}
2580 				}
2581 				pDev->tid = d->lct_data.tid;
2582 				pDev->scsi_channel = bus_no;
2583 				pDev->scsi_id = scsi_id;
2584 				pDev->scsi_lun = scsi_lun;
2585 				pDev->pI2o_dev = d;
2586 				d->owner = pDev;
2587 				pDev->type = (buf[0])&0xff;
2588 				pDev->flags = (buf[0]>>8)&0xff;
2589 				// Too late, SCSI system has made up it's mind, but what the hey ...
2590 				if(scsi_id > pHba->top_scsi_id){
2591 					pHba->top_scsi_id = scsi_id;
2592 				}
2593 				if(scsi_lun > pHba->top_scsi_lun){
2594 					pHba->top_scsi_lun = scsi_lun;
2595 				}
2596 				continue;
2597 			} // end of new i2o device
2598 
2599 			// We found an old device - check it
2600 			while(pDev) {
2601 				if(pDev->scsi_lun == scsi_lun) {
2602 					if(!scsi_device_online(pDev->pScsi_dev)) {
2603 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2604 								pHba->name,bus_no,scsi_id,scsi_lun);
2605 						if (pDev->pScsi_dev) {
2606 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2607 						}
2608 					}
2609 					d = pDev->pI2o_dev;
2610 					if(d->lct_data.tid != tid) { // something changed
2611 						pDev->tid = tid;
2612 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2613 						if (pDev->pScsi_dev) {
2614 							pDev->pScsi_dev->changed = TRUE;
2615 							pDev->pScsi_dev->removable = TRUE;
2616 						}
2617 					}
2618 					// Found it - mark it scanned
2619 					pDev->state = DPTI_DEV_ONLINE;
2620 					break;
2621 				}
2622 				pDev = pDev->next_lun;
2623 			}
2624 		}
2625 	}
2626 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2627 		pDev =(struct adpt_device*) pI2o_dev->owner;
2628 		if(!pDev){
2629 			continue;
2630 		}
2631 		// Drive offline drives that previously existed but could not be found
2632 		// in the LCT table
2633 		if (pDev->state & DPTI_DEV_UNSCANNED){
2634 			pDev->state = DPTI_DEV_OFFLINE;
2635 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2636 			if (pDev->pScsi_dev) {
2637 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2638 			}
2639 		}
2640 	}
2641 	return 0;
2642 }
2643 
2644 /*============================================================================
2645  *  Routines from i2o subsystem
2646  *============================================================================
2647  */
2648 
2649 
2650 
2651 /*
2652  *	Bring an I2O controller into HOLD state. See the spec.
2653  */
adpt_i2o_activate_hba(adpt_hba * pHba)2654 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2655 {
2656 	int rcode;
2657 
2658 	if(pHba->initialized ) {
2659 		if (adpt_i2o_status_get(pHba) < 0) {
2660 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2661 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2662 				return rcode;
2663 			}
2664 			if (adpt_i2o_status_get(pHba) < 0) {
2665 				printk(KERN_INFO "HBA not responding.\n");
2666 				return -1;
2667 			}
2668 		}
2669 
2670 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2671 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2672 			return -1;
2673 		}
2674 
2675 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2676 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2677 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2678 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2679 			adpt_i2o_reset_hba(pHba);
2680 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2681 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2682 				return -1;
2683 			}
2684 		}
2685 	} else {
2686 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2687 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2688 			return rcode;
2689 		}
2690 
2691 	}
2692 
2693 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2694 		return -1;
2695 	}
2696 
2697 	/* In HOLD state */
2698 
2699 	if (adpt_i2o_hrt_get(pHba) < 0) {
2700 		return -1;
2701 	}
2702 
2703 	return 0;
2704 }
2705 
2706 /*
2707  *	Bring a controller online into OPERATIONAL state.
2708  */
2709 
adpt_i2o_online_hba(adpt_hba * pHba)2710 static int adpt_i2o_online_hba(adpt_hba* pHba)
2711 {
2712 	if (adpt_i2o_systab_send(pHba) < 0)
2713 		return -1;
2714 	/* In READY state */
2715 
2716 	if (adpt_i2o_enable_hba(pHba) < 0)
2717 		return -1;
2718 
2719 	/* In OPERATIONAL state  */
2720 	return 0;
2721 }
2722 
adpt_send_nop(adpt_hba * pHba,u32 m)2723 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2724 {
2725 	u32 __iomem *msg;
2726 	ulong timeout = jiffies + 5*HZ;
2727 
2728 	while(m == EMPTY_QUEUE){
2729 		rmb();
2730 		m = readl(pHba->post_port);
2731 		if(m != EMPTY_QUEUE){
2732 			break;
2733 		}
2734 		if(time_after(jiffies,timeout)){
2735 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2736 			return 2;
2737 		}
2738 		schedule_timeout_uninterruptible(1);
2739 	}
2740 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2741 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2742 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2743 	writel( 0,&msg[2]);
2744 	wmb();
2745 
2746 	writel(m, pHba->post_port);
2747 	wmb();
2748 	return 0;
2749 }
2750 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2751 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2752 {
2753 	u8 *status;
2754 	dma_addr_t addr;
2755 	u32 __iomem *msg = NULL;
2756 	int i;
2757 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2758 	u32 m;
2759 
2760 	do {
2761 		rmb();
2762 		m = readl(pHba->post_port);
2763 		if (m != EMPTY_QUEUE) {
2764 			break;
2765 		}
2766 
2767 		if(time_after(jiffies,timeout)){
2768 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2769 			return -ETIMEDOUT;
2770 		}
2771 		schedule_timeout_uninterruptible(1);
2772 	} while(m == EMPTY_QUEUE);
2773 
2774 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2775 
2776 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2777 	if (!status) {
2778 		adpt_send_nop(pHba, m);
2779 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2780 			pHba->name);
2781 		return -ENOMEM;
2782 	}
2783 
2784 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2785 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2786 	writel(0, &msg[2]);
2787 	writel(0x0106, &msg[3]);	/* Transaction context */
2788 	writel(4096, &msg[4]);		/* Host page frame size */
2789 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2790 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2791 	writel((u32)addr, &msg[7]);
2792 
2793 	writel(m, pHba->post_port);
2794 	wmb();
2795 
2796 	// Wait for the reply status to come back
2797 	do {
2798 		if (*status) {
2799 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2800 				break;
2801 			}
2802 		}
2803 		rmb();
2804 		if(time_after(jiffies,timeout)){
2805 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2806 			/* We lose 4 bytes of "status" here, but we
2807 			   cannot free these because controller may
2808 			   awake and corrupt those bytes at any time */
2809 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2810 			return -ETIMEDOUT;
2811 		}
2812 		schedule_timeout_uninterruptible(1);
2813 	} while (1);
2814 
2815 	// If the command was successful, fill the fifo with our reply
2816 	// message packets
2817 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2818 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2819 		return -2;
2820 	}
2821 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2822 
2823 	if(pHba->reply_pool != NULL) {
2824 		dma_free_coherent(&pHba->pDev->dev,
2825 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2826 			pHba->reply_pool, pHba->reply_pool_pa);
2827 	}
2828 
2829 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2830 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2831 				&pHba->reply_pool_pa, GFP_KERNEL);
2832 	if (!pHba->reply_pool) {
2833 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2834 		return -ENOMEM;
2835 	}
2836 
2837 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2838 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2839 			pHba->reply_port);
2840 		wmb();
2841 	}
2842 	adpt_i2o_status_get(pHba);
2843 	return 0;
2844 }
2845 
2846 
2847 /*
2848  * I2O System Table.  Contains information about
2849  * all the IOPs in the system.  Used to inform IOPs
2850  * about each other's existence.
2851  *
2852  * sys_tbl_ver is the CurrentChangeIndicator that is
2853  * used by IOPs to track changes.
2854  */
2855 
2856 
2857 
adpt_i2o_status_get(adpt_hba * pHba)2858 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2859 {
2860 	ulong timeout;
2861 	u32 m;
2862 	u32 __iomem *msg;
2863 	u8 *status_block=NULL;
2864 
2865 	if(pHba->status_block == NULL) {
2866 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2867 					sizeof(i2o_status_block),
2868 					&pHba->status_block_pa, GFP_KERNEL);
2869 		if(pHba->status_block == NULL) {
2870 			printk(KERN_ERR
2871 			"dpti%d: Get Status Block failed; Out of memory. \n",
2872 			pHba->unit);
2873 			return -ENOMEM;
2874 		}
2875 	}
2876 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2877 	status_block = (u8*)(pHba->status_block);
2878 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2879 	do {
2880 		rmb();
2881 		m = readl(pHba->post_port);
2882 		if (m != EMPTY_QUEUE) {
2883 			break;
2884 		}
2885 		if(time_after(jiffies,timeout)){
2886 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2887 					pHba->name);
2888 			return -ETIMEDOUT;
2889 		}
2890 		schedule_timeout_uninterruptible(1);
2891 	} while(m==EMPTY_QUEUE);
2892 
2893 
2894 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2895 
2896 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2897 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2898 	writel(1, &msg[2]);
2899 	writel(0, &msg[3]);
2900 	writel(0, &msg[4]);
2901 	writel(0, &msg[5]);
2902 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2903 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2904 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2905 
2906 	//post message
2907 	writel(m, pHba->post_port);
2908 	wmb();
2909 
2910 	while(status_block[87]!=0xff){
2911 		if(time_after(jiffies,timeout)){
2912 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2913 				pHba->unit);
2914 			return -ETIMEDOUT;
2915 		}
2916 		rmb();
2917 		schedule_timeout_uninterruptible(1);
2918 	}
2919 
2920 	// Set up our number of outbound and inbound messages
2921 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2922 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2923 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2924 	}
2925 
2926 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2927 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2928 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2929 	}
2930 
2931 	// Calculate the Scatter Gather list size
2932 	if (dpt_dma64(pHba)) {
2933 		pHba->sg_tablesize
2934 		  = ((pHba->status_block->inbound_frame_size * 4
2935 		  - 14 * sizeof(u32))
2936 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2937 	} else {
2938 		pHba->sg_tablesize
2939 		  = ((pHba->status_block->inbound_frame_size * 4
2940 		  - 12 * sizeof(u32))
2941 		  / sizeof(struct sg_simple_element));
2942 	}
2943 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2944 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
2945 	}
2946 
2947 
2948 #ifdef DEBUG
2949 	printk("dpti%d: State = ",pHba->unit);
2950 	switch(pHba->status_block->iop_state) {
2951 		case 0x01:
2952 			printk("INIT\n");
2953 			break;
2954 		case 0x02:
2955 			printk("RESET\n");
2956 			break;
2957 		case 0x04:
2958 			printk("HOLD\n");
2959 			break;
2960 		case 0x05:
2961 			printk("READY\n");
2962 			break;
2963 		case 0x08:
2964 			printk("OPERATIONAL\n");
2965 			break;
2966 		case 0x10:
2967 			printk("FAILED\n");
2968 			break;
2969 		case 0x11:
2970 			printk("FAULTED\n");
2971 			break;
2972 		default:
2973 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2974 	}
2975 #endif
2976 	return 0;
2977 }
2978 
2979 /*
2980  * Get the IOP's Logical Configuration Table
2981  */
adpt_i2o_lct_get(adpt_hba * pHba)2982 static int adpt_i2o_lct_get(adpt_hba* pHba)
2983 {
2984 	u32 msg[8];
2985 	int ret;
2986 	u32 buf[16];
2987 
2988 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2989 		pHba->lct_size = pHba->status_block->expected_lct_size;
2990 	}
2991 	do {
2992 		if (pHba->lct == NULL) {
2993 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2994 					pHba->lct_size, &pHba->lct_pa,
2995 					GFP_ATOMIC);
2996 			if(pHba->lct == NULL) {
2997 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2998 					pHba->name);
2999 				return -ENOMEM;
3000 			}
3001 		}
3002 		memset(pHba->lct, 0, pHba->lct_size);
3003 
3004 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3005 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3006 		msg[2] = 0;
3007 		msg[3] = 0;
3008 		msg[4] = 0xFFFFFFFF;	/* All devices */
3009 		msg[5] = 0x00000000;	/* Report now */
3010 		msg[6] = 0xD0000000|pHba->lct_size;
3011 		msg[7] = (u32)pHba->lct_pa;
3012 
3013 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3014 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3015 				pHba->name, ret);
3016 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3017 			return ret;
3018 		}
3019 
3020 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3021 			pHba->lct_size = pHba->lct->table_size << 2;
3022 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3023 					pHba->lct, pHba->lct_pa);
3024 			pHba->lct = NULL;
3025 		}
3026 	} while (pHba->lct == NULL);
3027 
3028 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3029 
3030 
3031 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3032 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3033 		pHba->FwDebugBufferSize = buf[1];
3034 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3035 						pHba->FwDebugBufferSize);
3036 		if (pHba->FwDebugBuffer_P) {
3037 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3038 							FW_DEBUG_FLAGS_OFFSET;
3039 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3040 							FW_DEBUG_BLED_OFFSET;
3041 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3042 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3043 						FW_DEBUG_STR_LENGTH_OFFSET;
3044 			pHba->FwDebugBuffer_P += buf[2];
3045 			pHba->FwDebugFlags = 0;
3046 		}
3047 	}
3048 
3049 	return 0;
3050 }
3051 
adpt_i2o_build_sys_table(void)3052 static int adpt_i2o_build_sys_table(void)
3053 {
3054 	adpt_hba* pHba = hba_chain;
3055 	int count = 0;
3056 
3057 	if (sys_tbl)
3058 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3059 					sys_tbl, sys_tbl_pa);
3060 
3061 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3062 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3063 
3064 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3065 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3066 	if (!sys_tbl) {
3067 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3068 		return -ENOMEM;
3069 	}
3070 
3071 	sys_tbl->num_entries = hba_count;
3072 	sys_tbl->version = I2OVERSION;
3073 	sys_tbl->change_ind = sys_tbl_ind++;
3074 
3075 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3076 		u64 addr;
3077 		// Get updated Status Block so we have the latest information
3078 		if (adpt_i2o_status_get(pHba)) {
3079 			sys_tbl->num_entries--;
3080 			continue; // try next one
3081 		}
3082 
3083 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3084 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3085 		sys_tbl->iops[count].seg_num = 0;
3086 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3087 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3088 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3089 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3090 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3091 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3092 		addr = pHba->base_addr_phys + 0x40;
3093 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3094 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3095 
3096 		count++;
3097 	}
3098 
3099 #ifdef DEBUG
3100 {
3101 	u32 *table = (u32*)sys_tbl;
3102 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3103 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3104 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3105 			count, table[count]);
3106 	}
3107 }
3108 #endif
3109 
3110 	return 0;
3111 }
3112 
3113 
3114 /*
3115  *	 Dump the information block associated with a given unit (TID)
3116  */
3117 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3118 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3119 {
3120 	char buf[64];
3121 	int unit = d->lct_data.tid;
3122 
3123 	printk(KERN_INFO "TID %3.3d ", unit);
3124 
3125 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3126 	{
3127 		buf[16]=0;
3128 		printk(" Vendor: %-12.12s", buf);
3129 	}
3130 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3131 	{
3132 		buf[16]=0;
3133 		printk(" Device: %-12.12s", buf);
3134 	}
3135 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3136 	{
3137 		buf[8]=0;
3138 		printk(" Rev: %-12.12s\n", buf);
3139 	}
3140 #ifdef DEBUG
3141 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3142 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3143 	 printk(KERN_INFO "\tFlags: ");
3144 
3145 	 if(d->lct_data.device_flags&(1<<0))
3146 		  printk("C");	     // ConfigDialog requested
3147 	 if(d->lct_data.device_flags&(1<<1))
3148 		  printk("U");	     // Multi-user capable
3149 	 if(!(d->lct_data.device_flags&(1<<4)))
3150 		  printk("P");	     // Peer service enabled!
3151 	 if(!(d->lct_data.device_flags&(1<<5)))
3152 		  printk("M");	     // Mgmt service enabled!
3153 	 printk("\n");
3154 #endif
3155 }
3156 
3157 #ifdef DEBUG
3158 /*
3159  *	Do i2o class name lookup
3160  */
adpt_i2o_get_class_name(int class)3161 static const char *adpt_i2o_get_class_name(int class)
3162 {
3163 	int idx = 16;
3164 	static char *i2o_class_name[] = {
3165 		"Executive",
3166 		"Device Driver Module",
3167 		"Block Device",
3168 		"Tape Device",
3169 		"LAN Interface",
3170 		"WAN Interface",
3171 		"Fibre Channel Port",
3172 		"Fibre Channel Device",
3173 		"SCSI Device",
3174 		"ATE Port",
3175 		"ATE Device",
3176 		"Floppy Controller",
3177 		"Floppy Device",
3178 		"Secondary Bus Port",
3179 		"Peer Transport Agent",
3180 		"Peer Transport",
3181 		"Unknown"
3182 	};
3183 
3184 	switch(class&0xFFF) {
3185 	case I2O_CLASS_EXECUTIVE:
3186 		idx = 0; break;
3187 	case I2O_CLASS_DDM:
3188 		idx = 1; break;
3189 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3190 		idx = 2; break;
3191 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3192 		idx = 3; break;
3193 	case I2O_CLASS_LAN:
3194 		idx = 4; break;
3195 	case I2O_CLASS_WAN:
3196 		idx = 5; break;
3197 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3198 		idx = 6; break;
3199 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3200 		idx = 7; break;
3201 	case I2O_CLASS_SCSI_PERIPHERAL:
3202 		idx = 8; break;
3203 	case I2O_CLASS_ATE_PORT:
3204 		idx = 9; break;
3205 	case I2O_CLASS_ATE_PERIPHERAL:
3206 		idx = 10; break;
3207 	case I2O_CLASS_FLOPPY_CONTROLLER:
3208 		idx = 11; break;
3209 	case I2O_CLASS_FLOPPY_DEVICE:
3210 		idx = 12; break;
3211 	case I2O_CLASS_BUS_ADAPTER_PORT:
3212 		idx = 13; break;
3213 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3214 		idx = 14; break;
3215 	case I2O_CLASS_PEER_TRANSPORT:
3216 		idx = 15; break;
3217 	}
3218 	return i2o_class_name[idx];
3219 }
3220 #endif
3221 
3222 
adpt_i2o_hrt_get(adpt_hba * pHba)3223 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3224 {
3225 	u32 msg[6];
3226 	int ret, size = sizeof(i2o_hrt);
3227 
3228 	do {
3229 		if (pHba->hrt == NULL) {
3230 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3231 					size, &pHba->hrt_pa, GFP_KERNEL);
3232 			if (pHba->hrt == NULL) {
3233 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3234 				return -ENOMEM;
3235 			}
3236 		}
3237 
3238 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3239 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3240 		msg[2]= 0;
3241 		msg[3]= 0;
3242 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3243 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3244 
3245 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3246 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3247 			return ret;
3248 		}
3249 
3250 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3251 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3252 			dma_free_coherent(&pHba->pDev->dev, size,
3253 				pHba->hrt, pHba->hrt_pa);
3254 			size = newsize;
3255 			pHba->hrt = NULL;
3256 		}
3257 	} while(pHba->hrt == NULL);
3258 	return 0;
3259 }
3260 
3261 /*
3262  *	 Query one scalar group value or a whole scalar group.
3263  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3264 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3265 			int group, int field, void *buf, int buflen)
3266 {
3267 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3268 	u8 *opblk_va;
3269 	dma_addr_t opblk_pa;
3270 	u8 *resblk_va;
3271 	dma_addr_t resblk_pa;
3272 
3273 	int size;
3274 
3275 	/* 8 bytes for header */
3276 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3277 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3278 	if (resblk_va == NULL) {
3279 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3280 		return -ENOMEM;
3281 	}
3282 
3283 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3284 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3285 	if (opblk_va == NULL) {
3286 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3287 			resblk_va, resblk_pa);
3288 		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3289 			pHba->name);
3290 		return -ENOMEM;
3291 	}
3292 	if (field == -1)  		/* whole group */
3293 			opblk[4] = -1;
3294 
3295 	memcpy(opblk_va, opblk, sizeof(opblk));
3296 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3297 		opblk_va, opblk_pa, sizeof(opblk),
3298 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3299 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3300 	if (size == -ETIME) {
3301 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3302 							resblk_va, resblk_pa);
3303 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3304 		return -ETIME;
3305 	} else if (size == -EINTR) {
3306 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3307 							resblk_va, resblk_pa);
3308 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3309 		return -EINTR;
3310 	}
3311 
3312 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3313 
3314 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3315 						resblk_va, resblk_pa);
3316 	if (size < 0)
3317 		return size;
3318 
3319 	return buflen;
3320 }
3321 
3322 
3323 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3324  *
3325  *	This function can be used for all UtilParamsGet/Set operations.
3326  *	The OperationBlock is given in opblk-buffer,
3327  *	and results are returned in resblk-buffer.
3328  *	Note that the minimum sized resblk is 8 bytes and contains
3329  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3330  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3331 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3332 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3333 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3334 {
3335 	u32 msg[9];
3336 	u32 *res = (u32 *)resblk_va;
3337 	int wait_status;
3338 
3339 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3340 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3341 	msg[2] = 0;
3342 	msg[3] = 0;
3343 	msg[4] = 0;
3344 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3345 	msg[6] = (u32)opblk_pa;
3346 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3347 	msg[8] = (u32)resblk_pa;
3348 
3349 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3350 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3351    		return wait_status; 	/* -DetailedStatus */
3352 	}
3353 
3354 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3355 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3356 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3357 			pHba->name,
3358 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3359 							 : "PARAMS_GET",
3360 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3361 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3362 	}
3363 
3364 	return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3365 }
3366 
3367 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3368 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3369 {
3370 	u32 msg[4];
3371 	int ret;
3372 
3373 	adpt_i2o_status_get(pHba);
3374 
3375 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3376 
3377 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3378    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3379 		return 0;
3380 	}
3381 
3382 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3383 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3384 	msg[2] = 0;
3385 	msg[3] = 0;
3386 
3387 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3388 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3389 				pHba->unit, -ret);
3390 	} else {
3391 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3392 	}
3393 
3394 	adpt_i2o_status_get(pHba);
3395 	return ret;
3396 }
3397 
3398 
3399 /*
3400  * Enable IOP. Allows the IOP to resume external operations.
3401  */
adpt_i2o_enable_hba(adpt_hba * pHba)3402 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3403 {
3404 	u32 msg[4];
3405 	int ret;
3406 
3407 	adpt_i2o_status_get(pHba);
3408 	if(!pHba->status_block){
3409 		return -ENOMEM;
3410 	}
3411 	/* Enable only allowed on READY state */
3412 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3413 		return 0;
3414 
3415 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3416 		return -EINVAL;
3417 
3418 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3419 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3420 	msg[2]= 0;
3421 	msg[3]= 0;
3422 
3423 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3424 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3425 			pHba->name, ret);
3426 	} else {
3427 		PDEBUG("%s: Enabled.\n", pHba->name);
3428 	}
3429 
3430 	adpt_i2o_status_get(pHba);
3431 	return ret;
3432 }
3433 
3434 
adpt_i2o_systab_send(adpt_hba * pHba)3435 static int adpt_i2o_systab_send(adpt_hba* pHba)
3436 {
3437 	u32 msg[12];
3438 	int ret;
3439 
3440 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3441 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3442 	msg[2] = 0;
3443 	msg[3] = 0;
3444 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3445 	msg[5] = 0;				   /* Segment 0 */
3446 
3447 	/*
3448 	 * Provide three SGL-elements:
3449 	 * System table (SysTab), Private memory space declaration and
3450 	 * Private i/o space declaration
3451 	 */
3452 	msg[6] = 0x54000000 | sys_tbl_len;
3453 	msg[7] = (u32)sys_tbl_pa;
3454 	msg[8] = 0x54000000 | 0;
3455 	msg[9] = 0;
3456 	msg[10] = 0xD4000000 | 0;
3457 	msg[11] = 0;
3458 
3459 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3460 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3461 			pHba->name, ret);
3462 	}
3463 #ifdef DEBUG
3464 	else {
3465 		PINFO("%s: SysTab set.\n", pHba->name);
3466 	}
3467 #endif
3468 
3469 	return ret;
3470 }
3471 
3472 
3473 /*============================================================================
3474  *
3475  *============================================================================
3476  */
3477 
3478 
3479 #ifdef UARTDELAY
3480 
adpt_delay(int millisec)3481 static static void adpt_delay(int millisec)
3482 {
3483 	int i;
3484 	for (i = 0; i < millisec; i++) {
3485 		udelay(1000);	/* delay for one millisecond */
3486 	}
3487 }
3488 
3489 #endif
3490 
3491 static struct scsi_host_template driver_template = {
3492 	.module			= THIS_MODULE,
3493 	.name			= "dpt_i2o",
3494 	.proc_name		= "dpt_i2o",
3495 	.show_info		= adpt_show_info,
3496 	.info			= adpt_info,
3497 	.queuecommand		= adpt_queue,
3498 	.eh_abort_handler	= adpt_abort,
3499 	.eh_device_reset_handler = adpt_device_reset,
3500 	.eh_bus_reset_handler	= adpt_bus_reset,
3501 	.eh_host_reset_handler	= adpt_reset,
3502 	.bios_param		= adpt_bios_param,
3503 	.slave_configure	= adpt_slave_configure,
3504 	.can_queue		= MAX_TO_IOP_MESSAGES,
3505 	.this_id		= 7,
3506 };
3507 
adpt_init(void)3508 static int __init adpt_init(void)
3509 {
3510 	int		error;
3511 	adpt_hba	*pHba, *next;
3512 
3513 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3514 
3515 	error = adpt_detect(&driver_template);
3516 	if (error < 0)
3517 		return error;
3518 	if (hba_chain == NULL)
3519 		return -ENODEV;
3520 
3521 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3522 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3523 		if (error)
3524 			goto fail;
3525 		scsi_scan_host(pHba->host);
3526 	}
3527 	return 0;
3528 fail:
3529 	for (pHba = hba_chain; pHba; pHba = next) {
3530 		next = pHba->next;
3531 		scsi_remove_host(pHba->host);
3532 	}
3533 	return error;
3534 }
3535 
adpt_exit(void)3536 static void __exit adpt_exit(void)
3537 {
3538 	adpt_hba	*pHba, *next;
3539 
3540 	for (pHba = hba_chain; pHba; pHba = next) {
3541 		next = pHba->next;
3542 		adpt_release(pHba);
3543 	}
3544 }
3545 
3546 module_init(adpt_init);
3547 module_exit(adpt_exit);
3548 
3549 MODULE_LICENSE("GPL");
3550