1 /* -*- mode: c; c-basic-offset: 8 -*- */
2
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
12 **
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
17 **
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 **
22 **-----------------------------------------------------------------------------
23 */
24
25 /* Notes:
26 *
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
31 *
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
34 *
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
42 *
43 *
44 * TODO List:
45 *
46 * 1. Better statistics in the proc fs
47 *
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
51
52 /* CHANGELOG
53 *
54 * Version 2.8
55 *
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
61 *
62 * Version 2.7
63 *
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
69 *
70 * Version 2.6
71 *
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
75 *
76 * Version 2.5
77 *
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
83 *
84 * Version 2.4
85 *
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
88 *
89 * Version 2.3
90 *
91 * More endianness/cache coherency changes.
92 *
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
96 *
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
100 *
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
103 *
104 * Version 2.2
105 *
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
109 *
110 * Version 2.1
111 *
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
116
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/slab.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/init.h>
126 #include <linux/proc_fs.h>
127 #include <linux/blkdev.h>
128 #include <linux/module.h>
129 #include <linux/interrupt.h>
130 #include <linux/device.h>
131 #include <asm/dma.h>
132 #include <asm/io.h>
133 #include <asm/pgtable.h>
134 #include <asm/byteorder.h>
135
136 #include <scsi/scsi.h>
137 #include <scsi/scsi_cmnd.h>
138 #include <scsi/scsi_dbg.h>
139 #include <scsi/scsi_eh.h>
140 #include <scsi/scsi_host.h>
141 #include <scsi/scsi_tcq.h>
142 #include <scsi/scsi_transport.h>
143 #include <scsi/scsi_transport_spi.h>
144
145 #include "53c700.h"
146
147 /* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end. This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153 #define to32bit(x) ((__u32)((unsigned long)(x)))
154
155 #ifdef NCR_700_DEBUG
156 #define STATIC
157 #else
158 #define STATIC static
159 #endif
160
161 MODULE_AUTHOR("James Bottomley");
162 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163 MODULE_LICENSE("GPL");
164
165 /* This is the script */
166 #include "53c700_d.h"
167
168
169 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
170 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
172 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
173 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
174 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
175 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
176 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
177 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
178
179 STATIC struct device_attribute *NCR_700_dev_attrs[];
180
181 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
182
183 static char *NCR_700_phase[] = {
184 "",
185 "after selection",
186 "before command phase",
187 "after command phase",
188 "after status phase",
189 "after data in phase",
190 "after data out phase",
191 "during data phase",
192 };
193
194 static char *NCR_700_condition[] = {
195 "",
196 "NOT MSG_OUT",
197 "UNEXPECTED PHASE",
198 "NOT MSG_IN",
199 "UNEXPECTED MSG",
200 "MSG_IN",
201 "SDTR_MSG RECEIVED",
202 "REJECT_MSG RECEIVED",
203 "DISCONNECT_MSG RECEIVED",
204 "MSG_OUT",
205 "DATA_IN",
206
207 };
208
209 static char *NCR_700_fatal_messages[] = {
210 "unexpected message after reselection",
211 "still MSG_OUT after message injection",
212 "not MSG_IN after selection",
213 "Illegal message length received",
214 };
215
216 static char *NCR_700_SBCL_bits[] = {
217 "IO ",
218 "CD ",
219 "MSG ",
220 "ATN ",
221 "SEL ",
222 "BSY ",
223 "ACK ",
224 "REQ ",
225 };
226
227 static char *NCR_700_SBCL_to_phase[] = {
228 "DATA_OUT",
229 "DATA_IN",
230 "CMD_OUT",
231 "STATE",
232 "ILLEGAL PHASE",
233 "ILLEGAL PHASE",
234 "MSG OUT",
235 "MSG IN",
236 };
237
238 /* This translates the SDTR message offset and period to a value
239 * which can be loaded into the SXFER_REG.
240 *
241 * NOTE: According to SCSI-2, the true transfer period (in ns) is
242 * actually four times this period value */
243 static inline __u8
NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters * hostdata,__u8 offset,__u8 period)244 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
245 __u8 offset, __u8 period)
246 {
247 int XFERP;
248
249 __u8 min_xferp = (hostdata->chip710
250 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
251 __u8 max_offset = (hostdata->chip710
252 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
253
254 if(offset == 0)
255 return 0;
256
257 if(period < hostdata->min_period) {
258 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
259 period = hostdata->min_period;
260 }
261 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
262 if(offset > max_offset) {
263 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
264 offset, max_offset);
265 offset = max_offset;
266 }
267 if(XFERP < min_xferp) {
268 XFERP = min_xferp;
269 }
270 return (offset & 0x0f) | (XFERP & 0x07)<<4;
271 }
272
273 static inline __u8
NCR_700_get_SXFER(struct scsi_device * SDp)274 NCR_700_get_SXFER(struct scsi_device *SDp)
275 {
276 struct NCR_700_Host_Parameters *hostdata =
277 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
278
279 return NCR_700_offset_period_to_sxfer(hostdata,
280 spi_offset(SDp->sdev_target),
281 spi_period(SDp->sdev_target));
282 }
283
284 struct Scsi_Host *
NCR_700_detect(struct scsi_host_template * tpnt,struct NCR_700_Host_Parameters * hostdata,struct device * dev)285 NCR_700_detect(struct scsi_host_template *tpnt,
286 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
287 {
288 dma_addr_t pScript, pSlots;
289 __u8 *memory;
290 __u32 *script;
291 struct Scsi_Host *host;
292 static int banner = 0;
293 int j;
294
295 if(tpnt->sdev_attrs == NULL)
296 tpnt->sdev_attrs = NCR_700_dev_attrs;
297
298 memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
300 if(memory == NULL) {
301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
302 return NULL;
303 }
304
305 script = (__u32 *)memory;
306 hostdata->msgin = memory + MSGIN_OFFSET;
307 hostdata->msgout = memory + MSGOUT_OFFSET;
308 hostdata->status = memory + STATUS_OFFSET;
309 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
310 hostdata->dev = dev;
311
312 pSlots = pScript + SLOTS_OFFSET;
313
314 /* Fill in the missing routines from the host template */
315 tpnt->queuecommand = NCR_700_queuecommand;
316 tpnt->eh_abort_handler = NCR_700_abort;
317 tpnt->eh_host_reset_handler = NCR_700_host_reset;
318 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
319 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
320 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
321 tpnt->use_clustering = ENABLE_CLUSTERING;
322 tpnt->slave_configure = NCR_700_slave_configure;
323 tpnt->slave_destroy = NCR_700_slave_destroy;
324 tpnt->slave_alloc = NCR_700_slave_alloc;
325 tpnt->change_queue_depth = NCR_700_change_queue_depth;
326
327 if(tpnt->name == NULL)
328 tpnt->name = "53c700";
329 if(tpnt->proc_name == NULL)
330 tpnt->proc_name = "53c700";
331
332 host = scsi_host_alloc(tpnt, 4);
333 if (!host)
334 return NULL;
335 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
336 * NCR_700_COMMAND_SLOTS_PER_HOST);
337 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
338 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
339 - (unsigned long)&hostdata->slots[0].SG[0]);
340 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
341 if(j == 0)
342 hostdata->free_list = &hostdata->slots[j];
343 else
344 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
345 hostdata->slots[j].state = NCR_700_SLOT_FREE;
346 }
347
348 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
349 script[j] = bS_to_host(SCRIPT[j]);
350
351 /* adjust all labels to be bus physical */
352 for (j = 0; j < PATCHES; j++)
353 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
354 /* now patch up fixed addresses. */
355 script_patch_32(hostdata->dev, script, MessageLocation,
356 pScript + MSGOUT_OFFSET);
357 script_patch_32(hostdata->dev, script, StatusAddress,
358 pScript + STATUS_OFFSET);
359 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
360 pScript + MSGIN_OFFSET);
361
362 hostdata->script = script;
363 hostdata->pScript = pScript;
364 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
365 hostdata->state = NCR_700_HOST_FREE;
366 hostdata->cmd = NULL;
367 host->max_id = 8;
368 host->max_lun = NCR_700_MAX_LUNS;
369 BUG_ON(NCR_700_transport_template == NULL);
370 host->transportt = NCR_700_transport_template;
371 host->unique_id = (unsigned long)hostdata->base;
372 hostdata->eh_complete = NULL;
373 host->hostdata[0] = (unsigned long)hostdata;
374 /* kick the chip */
375 NCR_700_writeb(0xff, host, CTEST9_REG);
376 if (hostdata->chip710)
377 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
378 else
379 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
380 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
381 if (banner == 0) {
382 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
383 banner = 1;
384 }
385 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
386 hostdata->chip710 ? "53c710" :
387 (hostdata->fast ? "53c700-66" : "53c700"),
388 hostdata->rev, hostdata->differential ?
389 "(Differential)" : "");
390 /* reset the chip */
391 NCR_700_chip_reset(host);
392
393 if (scsi_add_host(host, dev)) {
394 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
395 scsi_host_put(host);
396 return NULL;
397 }
398
399 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
400 SPI_SIGNAL_SE;
401
402 return host;
403 }
404
405 int
NCR_700_release(struct Scsi_Host * host)406 NCR_700_release(struct Scsi_Host *host)
407 {
408 struct NCR_700_Host_Parameters *hostdata =
409 (struct NCR_700_Host_Parameters *)host->hostdata[0];
410
411 dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
412 hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
413 return 1;
414 }
415
416 static inline __u8
NCR_700_identify(int can_disconnect,__u8 lun)417 NCR_700_identify(int can_disconnect, __u8 lun)
418 {
419 return IDENTIFY_BASE |
420 ((can_disconnect) ? 0x40 : 0) |
421 (lun & NCR_700_LUN_MASK);
422 }
423
424 /*
425 * Function : static int data_residual (Scsi_Host *host)
426 *
427 * Purpose : return residual data count of what's in the chip. If you
428 * really want to know what this function is doing, it's almost a
429 * direct transcription of the algorithm described in the 53c710
430 * guide, except that the DBC and DFIFO registers are only 6 bits
431 * wide on a 53c700.
432 *
433 * Inputs : host - SCSI host */
434 static inline int
NCR_700_data_residual(struct Scsi_Host * host)435 NCR_700_data_residual (struct Scsi_Host *host) {
436 struct NCR_700_Host_Parameters *hostdata =
437 (struct NCR_700_Host_Parameters *)host->hostdata[0];
438 int count, synchronous = 0;
439 unsigned int ddir;
440
441 if(hostdata->chip710) {
442 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
443 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
444 } else {
445 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
446 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
447 }
448
449 if(hostdata->fast)
450 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
451
452 /* get the data direction */
453 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
454
455 if (ddir) {
456 /* Receive */
457 if (synchronous)
458 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
459 else
460 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
461 ++count;
462 } else {
463 /* Send */
464 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
465 if (sstat & SODL_REG_FULL)
466 ++count;
467 if (synchronous && (sstat & SODR_REG_FULL))
468 ++count;
469 }
470 #ifdef NCR_700_DEBUG
471 if(count)
472 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
473 #endif
474 return count;
475 }
476
477 /* print out the SCSI wires and corresponding phase from the SBCL register
478 * in the chip */
479 static inline char *
sbcl_to_string(__u8 sbcl)480 sbcl_to_string(__u8 sbcl)
481 {
482 int i;
483 static char ret[256];
484
485 ret[0]='\0';
486 for(i=0; i<8; i++) {
487 if((1<<i) & sbcl)
488 strcat(ret, NCR_700_SBCL_bits[i]);
489 }
490 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
491 return ret;
492 }
493
494 static inline __u8
bitmap_to_number(__u8 bitmap)495 bitmap_to_number(__u8 bitmap)
496 {
497 __u8 i;
498
499 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
500 ;
501 return i;
502 }
503
504 /* Pull a slot off the free list */
505 STATIC struct NCR_700_command_slot *
find_empty_slot(struct NCR_700_Host_Parameters * hostdata)506 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
507 {
508 struct NCR_700_command_slot *slot = hostdata->free_list;
509
510 if(slot == NULL) {
511 /* sanity check */
512 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
513 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
514 return NULL;
515 }
516
517 if(slot->state != NCR_700_SLOT_FREE)
518 /* should panic! */
519 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
520
521
522 hostdata->free_list = slot->ITL_forw;
523 slot->ITL_forw = NULL;
524
525
526 /* NOTE: set the state to busy here, not queued, since this
527 * indicates the slot is in use and cannot be run by the IRQ
528 * finish routine. If we cannot queue the command when it
529 * is properly build, we then change to NCR_700_SLOT_QUEUED */
530 slot->state = NCR_700_SLOT_BUSY;
531 slot->flags = 0;
532 hostdata->command_slot_count++;
533
534 return slot;
535 }
536
537 STATIC void
free_slot(struct NCR_700_command_slot * slot,struct NCR_700_Host_Parameters * hostdata)538 free_slot(struct NCR_700_command_slot *slot,
539 struct NCR_700_Host_Parameters *hostdata)
540 {
541 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
542 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
543 }
544 if(slot->state == NCR_700_SLOT_FREE) {
545 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
546 }
547
548 slot->resume_offset = 0;
549 slot->cmnd = NULL;
550 slot->state = NCR_700_SLOT_FREE;
551 slot->ITL_forw = hostdata->free_list;
552 hostdata->free_list = slot;
553 hostdata->command_slot_count--;
554 }
555
556
557 /* This routine really does very little. The command is indexed on
558 the ITL and (if tagged) the ITLQ lists in _queuecommand */
559 STATIC void
save_for_reselection(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp)560 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
561 struct scsi_cmnd *SCp, __u32 dsp)
562 {
563 /* Its just possible that this gets executed twice */
564 if(SCp != NULL) {
565 struct NCR_700_command_slot *slot =
566 (struct NCR_700_command_slot *)SCp->host_scribble;
567
568 slot->resume_offset = dsp;
569 }
570 hostdata->state = NCR_700_HOST_FREE;
571 hostdata->cmd = NULL;
572 }
573
574 STATIC inline void
NCR_700_unmap(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,struct NCR_700_command_slot * slot)575 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
576 struct NCR_700_command_slot *slot)
577 {
578 if(SCp->sc_data_direction != DMA_NONE &&
579 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
580 scsi_dma_unmap(SCp);
581 }
582
583 STATIC inline void
NCR_700_scsi_done(struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,int result)584 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
585 struct scsi_cmnd *SCp, int result)
586 {
587 hostdata->state = NCR_700_HOST_FREE;
588 hostdata->cmd = NULL;
589
590 if(SCp != NULL) {
591 struct NCR_700_command_slot *slot =
592 (struct NCR_700_command_slot *)SCp->host_scribble;
593
594 dma_unmap_single(hostdata->dev, slot->pCmd,
595 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
596 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
597 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
598
599 dma_unmap_single(hostdata->dev, slot->dma_handle,
600 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
601 /* restore the old result if the request sense was
602 * successful */
603 if (result == 0)
604 result = cmnd[7];
605 /* restore the original length */
606 SCp->cmd_len = cmnd[8];
607 } else
608 NCR_700_unmap(hostdata, SCp, slot);
609
610 free_slot(slot, hostdata);
611 #ifdef NCR_700_DEBUG
612 if(NCR_700_get_depth(SCp->device) == 0 ||
613 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
614 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
615 NCR_700_get_depth(SCp->device));
616 #endif /* NCR_700_DEBUG */
617 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
618
619 SCp->host_scribble = NULL;
620 SCp->result = result;
621 SCp->scsi_done(SCp);
622 } else {
623 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
624 }
625 }
626
627
628 STATIC void
NCR_700_internal_bus_reset(struct Scsi_Host * host)629 NCR_700_internal_bus_reset(struct Scsi_Host *host)
630 {
631 /* Bus reset */
632 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
633 udelay(50);
634 NCR_700_writeb(0, host, SCNTL1_REG);
635
636 }
637
638 STATIC void
NCR_700_chip_setup(struct Scsi_Host * host)639 NCR_700_chip_setup(struct Scsi_Host *host)
640 {
641 struct NCR_700_Host_Parameters *hostdata =
642 (struct NCR_700_Host_Parameters *)host->hostdata[0];
643 __u8 min_period;
644 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
645
646 if(hostdata->chip710) {
647 __u8 burst_disable = 0;
648 __u8 burst_length = 0;
649
650 switch (hostdata->burst_length) {
651 case 1:
652 burst_length = BURST_LENGTH_1;
653 break;
654 case 2:
655 burst_length = BURST_LENGTH_2;
656 break;
657 case 4:
658 burst_length = BURST_LENGTH_4;
659 break;
660 case 8:
661 burst_length = BURST_LENGTH_8;
662 break;
663 default:
664 burst_disable = BURST_DISABLE;
665 break;
666 }
667 hostdata->dcntl_extra |= COMPAT_700_MODE;
668
669 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
670 NCR_700_writeb(burst_length | hostdata->dmode_extra,
671 host, DMODE_710_REG);
672 NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
673 (hostdata->differential ? DIFF : 0),
674 host, CTEST7_REG);
675 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
676 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
677 | AUTO_ATN, host, SCNTL0_REG);
678 } else {
679 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
680 host, DMODE_700_REG);
681 NCR_700_writeb(hostdata->differential ?
682 DIFF : 0, host, CTEST7_REG);
683 if(hostdata->fast) {
684 /* this is for 700-66, does nothing on 700 */
685 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
686 | GENERATE_RECEIVE_PARITY, host,
687 CTEST8_REG);
688 } else {
689 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
690 | PARITY | AUTO_ATN, host, SCNTL0_REG);
691 }
692 }
693
694 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
695 NCR_700_writeb(0, host, SBCL_REG);
696 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
697
698 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
699 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
700
701 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
702 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
703 if(hostdata->clock > 75) {
704 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
705 /* do the best we can, but the async clock will be out
706 * of spec: sync divider 2, async divider 3 */
707 DEBUG(("53c700: sync 2 async 3\n"));
708 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
709 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
710 hostdata->sync_clock = hostdata->clock/2;
711 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
712 /* sync divider 1.5, async divider 3 */
713 DEBUG(("53c700: sync 1.5 async 3\n"));
714 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
715 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
716 hostdata->sync_clock = hostdata->clock*2;
717 hostdata->sync_clock /= 3;
718
719 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
720 /* sync divider 1, async divider 2 */
721 DEBUG(("53c700: sync 1 async 2\n"));
722 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
723 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
724 hostdata->sync_clock = hostdata->clock;
725 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
726 /* sync divider 1, async divider 1.5 */
727 DEBUG(("53c700: sync 1 async 1.5\n"));
728 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
729 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
730 hostdata->sync_clock = hostdata->clock;
731 } else {
732 DEBUG(("53c700: sync 1 async 1\n"));
733 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
734 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
735 /* sync divider 1, async divider 1 */
736 hostdata->sync_clock = hostdata->clock;
737 }
738 /* Calculate the actual minimum period that can be supported
739 * by our synchronous clock speed. See the 710 manual for
740 * exact details of this calculation which is based on a
741 * setting of the SXFER register */
742 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
743 hostdata->min_period = NCR_700_MIN_PERIOD;
744 if(min_period > NCR_700_MIN_PERIOD)
745 hostdata->min_period = min_period;
746 }
747
748 STATIC void
NCR_700_chip_reset(struct Scsi_Host * host)749 NCR_700_chip_reset(struct Scsi_Host *host)
750 {
751 struct NCR_700_Host_Parameters *hostdata =
752 (struct NCR_700_Host_Parameters *)host->hostdata[0];
753 if(hostdata->chip710) {
754 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
755 udelay(100);
756
757 NCR_700_writeb(0, host, ISTAT_REG);
758 } else {
759 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
760 udelay(100);
761
762 NCR_700_writeb(0, host, DCNTL_REG);
763 }
764
765 mdelay(1000);
766
767 NCR_700_chip_setup(host);
768 }
769
770 /* The heart of the message processing engine is that the instruction
771 * immediately after the INT is the normal case (and so must be CLEAR
772 * ACK). If we want to do something else, we call that routine in
773 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
774 * ACK) so that the routine returns correctly to resume its activity
775 * */
776 STATIC __u32
process_extended_message(struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp,__u32 dsps)777 process_extended_message(struct Scsi_Host *host,
778 struct NCR_700_Host_Parameters *hostdata,
779 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
780 {
781 __u32 resume_offset = dsp, temp = dsp + 8;
782 __u8 pun = 0xff, lun = 0xff;
783
784 if(SCp != NULL) {
785 pun = SCp->device->id;
786 lun = SCp->device->lun;
787 }
788
789 switch(hostdata->msgin[2]) {
790 case A_SDTR_MSG:
791 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
792 struct scsi_target *starget = SCp->device->sdev_target;
793 __u8 period = hostdata->msgin[3];
794 __u8 offset = hostdata->msgin[4];
795
796 if(offset == 0 || period == 0) {
797 offset = 0;
798 period = 0;
799 }
800
801 spi_offset(starget) = offset;
802 spi_period(starget) = period;
803
804 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
805 spi_display_xfer_agreement(starget);
806 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
807 }
808
809 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
810 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
811
812 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
813 host, SXFER_REG);
814
815 } else {
816 /* SDTR message out of the blue, reject it */
817 shost_printk(KERN_WARNING, host,
818 "Unexpected SDTR msg\n");
819 hostdata->msgout[0] = A_REJECT_MSG;
820 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
821 script_patch_16(hostdata->dev, hostdata->script,
822 MessageCount, 1);
823 /* SendMsgOut returns, so set up the return
824 * address */
825 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
826 }
827 break;
828
829 case A_WDTR_MSG:
830 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
831 host->host_no, pun, lun);
832 hostdata->msgout[0] = A_REJECT_MSG;
833 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
834 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
835 1);
836 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
837
838 break;
839
840 default:
841 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
842 host->host_no, pun, lun,
843 NCR_700_phase[(dsps & 0xf00) >> 8]);
844 spi_print_msg(hostdata->msgin);
845 printk("\n");
846 /* just reject it */
847 hostdata->msgout[0] = A_REJECT_MSG;
848 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
849 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
850 1);
851 /* SendMsgOut returns, so set up the return
852 * address */
853 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
854 }
855 NCR_700_writel(temp, host, TEMP_REG);
856 return resume_offset;
857 }
858
859 STATIC __u32
process_message(struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata,struct scsi_cmnd * SCp,__u32 dsp,__u32 dsps)860 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
861 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
862 {
863 /* work out where to return to */
864 __u32 temp = dsp + 8, resume_offset = dsp;
865 __u8 pun = 0xff, lun = 0xff;
866
867 if(SCp != NULL) {
868 pun = SCp->device->id;
869 lun = SCp->device->lun;
870 }
871
872 #ifdef NCR_700_DEBUG
873 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
874 NCR_700_phase[(dsps & 0xf00) >> 8]);
875 spi_print_msg(hostdata->msgin);
876 printk("\n");
877 #endif
878
879 switch(hostdata->msgin[0]) {
880
881 case A_EXTENDED_MSG:
882 resume_offset = process_extended_message(host, hostdata, SCp,
883 dsp, dsps);
884 break;
885
886 case A_REJECT_MSG:
887 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
888 /* Rejected our sync negotiation attempt */
889 spi_period(SCp->device->sdev_target) =
890 spi_offset(SCp->device->sdev_target) = 0;
891 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
892 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
893 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
894 /* rejected our first simple tag message */
895 scmd_printk(KERN_WARNING, SCp,
896 "Rejected first tag queue attempt, turning off tag queueing\n");
897 /* we're done negotiating */
898 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
899 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
900
901 SCp->device->tagged_supported = 0;
902 SCp->device->simple_tags = 0;
903 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
904 } else {
905 shost_printk(KERN_WARNING, host,
906 "(%d:%d) Unexpected REJECT Message %s\n",
907 pun, lun,
908 NCR_700_phase[(dsps & 0xf00) >> 8]);
909 /* however, just ignore it */
910 }
911 break;
912
913 case A_PARITY_ERROR_MSG:
914 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
915 pun, lun);
916 NCR_700_internal_bus_reset(host);
917 break;
918 case A_SIMPLE_TAG_MSG:
919 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
920 pun, lun, hostdata->msgin[1],
921 NCR_700_phase[(dsps & 0xf00) >> 8]);
922 /* just ignore it */
923 break;
924 default:
925 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
926 host->host_no, pun, lun,
927 NCR_700_phase[(dsps & 0xf00) >> 8]);
928
929 spi_print_msg(hostdata->msgin);
930 printk("\n");
931 /* just reject it */
932 hostdata->msgout[0] = A_REJECT_MSG;
933 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
934 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
935 1);
936 /* SendMsgOut returns, so set up the return
937 * address */
938 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
939
940 break;
941 }
942 NCR_700_writel(temp, host, TEMP_REG);
943 /* set us up to receive another message */
944 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
945 return resume_offset;
946 }
947
948 STATIC __u32
process_script_interrupt(__u32 dsps,__u32 dsp,struct scsi_cmnd * SCp,struct Scsi_Host * host,struct NCR_700_Host_Parameters * hostdata)949 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
950 struct Scsi_Host *host,
951 struct NCR_700_Host_Parameters *hostdata)
952 {
953 __u32 resume_offset = 0;
954 __u8 pun = 0xff, lun=0xff;
955
956 if(SCp != NULL) {
957 pun = SCp->device->id;
958 lun = SCp->device->lun;
959 }
960
961 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
962 DEBUG((" COMMAND COMPLETE, status=%02x\n",
963 hostdata->status[0]));
964 /* OK, if TCQ still under negotiation, we now know it works */
965 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
966 NCR_700_set_tag_neg_state(SCp->device,
967 NCR_700_FINISHED_TAG_NEGOTIATION);
968
969 /* check for contingent allegiance contitions */
970 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
971 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
972 struct NCR_700_command_slot *slot =
973 (struct NCR_700_command_slot *)SCp->host_scribble;
974 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
975 /* OOPS: bad device, returning another
976 * contingent allegiance condition */
977 scmd_printk(KERN_ERR, SCp,
978 "broken device is looping in contingent allegiance: ignoring\n");
979 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
980 } else {
981 char *cmnd =
982 NCR_700_get_sense_cmnd(SCp->device);
983 #ifdef NCR_DEBUG
984 scsi_print_command(SCp);
985 printk(" cmd %p has status %d, requesting sense\n",
986 SCp, hostdata->status[0]);
987 #endif
988 /* we can destroy the command here
989 * because the contingent allegiance
990 * condition will cause a retry which
991 * will re-copy the command from the
992 * saved data_cmnd. We also unmap any
993 * data associated with the command
994 * here */
995 NCR_700_unmap(hostdata, SCp, slot);
996 dma_unmap_single(hostdata->dev, slot->pCmd,
997 MAX_COMMAND_SIZE,
998 DMA_TO_DEVICE);
999
1000 cmnd[0] = REQUEST_SENSE;
1001 cmnd[1] = (lun & 0x7) << 5;
1002 cmnd[2] = 0;
1003 cmnd[3] = 0;
1004 cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1005 cmnd[5] = 0;
1006 /* Here's a quiet hack: the
1007 * REQUEST_SENSE command is six bytes,
1008 * so store a flag indicating that
1009 * this was an internal sense request
1010 * and the original status at the end
1011 * of the command */
1012 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1013 cmnd[7] = hostdata->status[0];
1014 cmnd[8] = SCp->cmd_len;
1015 SCp->cmd_len = 6; /* command length for
1016 * REQUEST_SENSE */
1017 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1018 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1019 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1020 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1021 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1022 slot->SG[1].pAddr = 0;
1023 slot->resume_offset = hostdata->pScript;
1024 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1025 dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1026
1027 /* queue the command for reissue */
1028 slot->state = NCR_700_SLOT_QUEUED;
1029 slot->flags = NCR_700_FLAG_AUTOSENSE;
1030 hostdata->state = NCR_700_HOST_FREE;
1031 hostdata->cmd = NULL;
1032 }
1033 } else {
1034 // Currently rely on the mid layer evaluation
1035 // of the tag queuing capability
1036 //
1037 //if(status_byte(hostdata->status[0]) == GOOD &&
1038 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1039 // /* Piggy back the tag queueing support
1040 // * on this command */
1041 // dma_sync_single_for_cpu(hostdata->dev,
1042 // slot->dma_handle,
1043 // SCp->request_bufflen,
1044 // DMA_FROM_DEVICE);
1045 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1046 // scmd_printk(KERN_INFO, SCp,
1047 // "Enabling Tag Command Queuing\n");
1048 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1049 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1050 // } else {
1051 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1052 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1053 // }
1054 //}
1055 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1056 }
1057 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1058 __u8 i = (dsps & 0xf00) >> 8;
1059
1060 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1061 NCR_700_phase[i],
1062 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1063 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1064 SCp->cmd_len);
1065 scsi_print_command(SCp);
1066
1067 NCR_700_internal_bus_reset(host);
1068 } else if((dsps & 0xfffff000) == A_FATAL) {
1069 int i = (dsps & 0xfff);
1070
1071 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1072 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1073 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1074 printk(KERN_ERR " msg begins %02x %02x\n",
1075 hostdata->msgin[0], hostdata->msgin[1]);
1076 }
1077 NCR_700_internal_bus_reset(host);
1078 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1079 #ifdef NCR_700_DEBUG
1080 __u8 i = (dsps & 0xf00) >> 8;
1081
1082 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1083 host->host_no, pun, lun,
1084 i, NCR_700_phase[i]);
1085 #endif
1086 save_for_reselection(hostdata, SCp, dsp);
1087
1088 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1089 __u8 lun;
1090 struct NCR_700_command_slot *slot;
1091 __u8 reselection_id = hostdata->reselection_id;
1092 struct scsi_device *SDp;
1093
1094 lun = hostdata->msgin[0] & 0x1f;
1095
1096 hostdata->reselection_id = 0xff;
1097 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1098 host->host_no, reselection_id, lun));
1099 /* clear the reselection indicator */
1100 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1101 if(unlikely(SDp == NULL)) {
1102 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1103 host->host_no, reselection_id, lun);
1104 BUG();
1105 }
1106 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1107 struct scsi_cmnd *SCp;
1108
1109 SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1110 if(unlikely(SCp == NULL)) {
1111 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1112 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1113 BUG();
1114 }
1115
1116 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1117 DDEBUG(KERN_DEBUG, SDp,
1118 "reselection is tag %d, slot %p(%d)\n",
1119 hostdata->msgin[2], slot, slot->tag);
1120 } else {
1121 struct NCR_700_Device_Parameters *p = SDp->hostdata;
1122 struct scsi_cmnd *SCp = p->current_cmnd;
1123
1124 if(unlikely(SCp == NULL)) {
1125 sdev_printk(KERN_ERR, SDp,
1126 "no saved request for untagged cmd\n");
1127 BUG();
1128 }
1129 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1130 }
1131
1132 if(slot == NULL) {
1133 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1134 host->host_no, reselection_id, lun,
1135 hostdata->msgin[0], hostdata->msgin[1],
1136 hostdata->msgin[2]);
1137 } else {
1138 if(hostdata->state != NCR_700_HOST_BUSY)
1139 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1140 host->host_no);
1141 resume_offset = slot->resume_offset;
1142 hostdata->cmd = slot->cmnd;
1143
1144 /* re-patch for this command */
1145 script_patch_32_abs(hostdata->dev, hostdata->script,
1146 CommandAddress, slot->pCmd);
1147 script_patch_16(hostdata->dev, hostdata->script,
1148 CommandCount, slot->cmnd->cmd_len);
1149 script_patch_32_abs(hostdata->dev, hostdata->script,
1150 SGScriptStartAddress,
1151 to32bit(&slot->pSG[0].ins));
1152
1153 /* Note: setting SXFER only works if we're
1154 * still in the MESSAGE phase, so it is vital
1155 * that ACK is still asserted when we process
1156 * the reselection message. The resume offset
1157 * should therefore always clear ACK */
1158 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1159 host, SXFER_REG);
1160 dma_cache_sync(hostdata->dev, hostdata->msgin,
1161 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1162 dma_cache_sync(hostdata->dev, hostdata->msgout,
1163 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1164 /* I'm just being paranoid here, the command should
1165 * already have been flushed from the cache */
1166 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1167 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1168
1169
1170
1171 }
1172 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1173
1174 /* This section is full of debugging code because I've
1175 * never managed to reach it. I think what happens is
1176 * that, because the 700 runs with selection
1177 * interrupts enabled the whole time that we take a
1178 * selection interrupt before we manage to get to the
1179 * reselected script interrupt */
1180
1181 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1182 struct NCR_700_command_slot *slot;
1183
1184 /* Take out our own ID */
1185 reselection_id &= ~(1<<host->this_id);
1186
1187 /* I've never seen this happen, so keep this as a printk rather
1188 * than a debug */
1189 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1190 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1191
1192 {
1193 /* FIXME: DEBUGGING CODE */
1194 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1195 int i;
1196
1197 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1198 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1199 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1200 break;
1201 }
1202 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1203 SCp = hostdata->slots[i].cmnd;
1204 }
1205
1206 if(SCp != NULL) {
1207 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1208 /* change slot from busy to queued to redo command */
1209 slot->state = NCR_700_SLOT_QUEUED;
1210 }
1211 hostdata->cmd = NULL;
1212
1213 if(reselection_id == 0) {
1214 if(hostdata->reselection_id == 0xff) {
1215 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1216 return 0;
1217 } else {
1218 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1219 host->host_no);
1220 reselection_id = hostdata->reselection_id;
1221 }
1222 } else {
1223
1224 /* convert to real ID */
1225 reselection_id = bitmap_to_number(reselection_id);
1226 }
1227 hostdata->reselection_id = reselection_id;
1228 /* just in case we have a stale simple tag message, clear it */
1229 hostdata->msgin[1] = 0;
1230 dma_cache_sync(hostdata->dev, hostdata->msgin,
1231 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1232 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1233 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1234 } else {
1235 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1236 }
1237 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1238 /* we've just disconnected from the bus, do nothing since
1239 * a return here will re-run the queued command slot
1240 * that may have been interrupted by the initial selection */
1241 DEBUG((" SELECTION COMPLETED\n"));
1242 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1243 resume_offset = process_message(host, hostdata, SCp,
1244 dsp, dsps);
1245 } else if((dsps & 0xfffff000) == 0) {
1246 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1247 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1248 host->host_no, pun, lun, NCR_700_condition[i],
1249 NCR_700_phase[j], dsp - hostdata->pScript);
1250 if(SCp != NULL) {
1251 struct scatterlist *sg;
1252
1253 scsi_print_command(SCp);
1254 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1255 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1256 }
1257 }
1258 NCR_700_internal_bus_reset(host);
1259 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1260 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1261 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1262 resume_offset = dsp;
1263 } else {
1264 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1265 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1266 NCR_700_internal_bus_reset(host);
1267 }
1268 return resume_offset;
1269 }
1270
1271 /* We run the 53c700 with selection interrupts always enabled. This
1272 * means that the chip may be selected as soon as the bus frees. On a
1273 * busy bus, this can be before the scripts engine finishes its
1274 * processing. Therefore, part of the selection processing has to be
1275 * to find out what the scripts engine is doing and complete the
1276 * function if necessary (i.e. process the pending disconnect or save
1277 * the interrupted initial selection */
1278 STATIC inline __u32
process_selection(struct Scsi_Host * host,__u32 dsp)1279 process_selection(struct Scsi_Host *host, __u32 dsp)
1280 {
1281 __u8 id = 0; /* Squash compiler warning */
1282 int count = 0;
1283 __u32 resume_offset = 0;
1284 struct NCR_700_Host_Parameters *hostdata =
1285 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1286 struct scsi_cmnd *SCp = hostdata->cmd;
1287 __u8 sbcl;
1288
1289 for(count = 0; count < 5; count++) {
1290 id = NCR_700_readb(host, hostdata->chip710 ?
1291 CTEST9_REG : SFBR_REG);
1292
1293 /* Take out our own ID */
1294 id &= ~(1<<host->this_id);
1295 if(id != 0)
1296 break;
1297 udelay(5);
1298 }
1299 sbcl = NCR_700_readb(host, SBCL_REG);
1300 if((sbcl & SBCL_IO) == 0) {
1301 /* mark as having been selected rather than reselected */
1302 id = 0xff;
1303 } else {
1304 /* convert to real ID */
1305 hostdata->reselection_id = id = bitmap_to_number(id);
1306 DEBUG(("scsi%d: Reselected by %d\n",
1307 host->host_no, id));
1308 }
1309 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1310 struct NCR_700_command_slot *slot =
1311 (struct NCR_700_command_slot *)SCp->host_scribble;
1312 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1313
1314 switch(dsp - hostdata->pScript) {
1315 case Ent_Disconnect1:
1316 case Ent_Disconnect2:
1317 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1318 break;
1319 case Ent_Disconnect3:
1320 case Ent_Disconnect4:
1321 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1322 break;
1323 case Ent_Disconnect5:
1324 case Ent_Disconnect6:
1325 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1326 break;
1327 case Ent_Disconnect7:
1328 case Ent_Disconnect8:
1329 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1330 break;
1331 case Ent_Finish1:
1332 case Ent_Finish2:
1333 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1334 break;
1335
1336 default:
1337 slot->state = NCR_700_SLOT_QUEUED;
1338 break;
1339 }
1340 }
1341 hostdata->state = NCR_700_HOST_BUSY;
1342 hostdata->cmd = NULL;
1343 /* clear any stale simple tag message */
1344 hostdata->msgin[1] = 0;
1345 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1346 DMA_BIDIRECTIONAL);
1347
1348 if(id == 0xff) {
1349 /* Selected as target, Ignore */
1350 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1351 } else if(hostdata->tag_negotiated & (1<<id)) {
1352 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1353 } else {
1354 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1355 }
1356 return resume_offset;
1357 }
1358
1359 static inline void
NCR_700_clear_fifo(struct Scsi_Host * host)1360 NCR_700_clear_fifo(struct Scsi_Host *host) {
1361 const struct NCR_700_Host_Parameters *hostdata
1362 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1363 if(hostdata->chip710) {
1364 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1365 } else {
1366 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1367 }
1368 }
1369
1370 static inline void
NCR_700_flush_fifo(struct Scsi_Host * host)1371 NCR_700_flush_fifo(struct Scsi_Host *host) {
1372 const struct NCR_700_Host_Parameters *hostdata
1373 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1374 if(hostdata->chip710) {
1375 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1376 udelay(10);
1377 NCR_700_writeb(0, host, CTEST8_REG);
1378 } else {
1379 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1380 udelay(10);
1381 NCR_700_writeb(0, host, DFIFO_REG);
1382 }
1383 }
1384
1385
1386 /* The queue lock with interrupts disabled must be held on entry to
1387 * this function */
1388 STATIC int
NCR_700_start_command(struct scsi_cmnd * SCp)1389 NCR_700_start_command(struct scsi_cmnd *SCp)
1390 {
1391 struct NCR_700_command_slot *slot =
1392 (struct NCR_700_command_slot *)SCp->host_scribble;
1393 struct NCR_700_Host_Parameters *hostdata =
1394 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1395 __u16 count = 1; /* for IDENTIFY message */
1396 u8 lun = SCp->device->lun;
1397
1398 if(hostdata->state != NCR_700_HOST_FREE) {
1399 /* keep this inside the lock to close the race window where
1400 * the running command finishes on another CPU while we don't
1401 * change the state to queued on this one */
1402 slot->state = NCR_700_SLOT_QUEUED;
1403
1404 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1405 SCp->device->host->host_no, slot->cmnd, slot));
1406 return 0;
1407 }
1408 hostdata->state = NCR_700_HOST_BUSY;
1409 hostdata->cmd = SCp;
1410 slot->state = NCR_700_SLOT_BUSY;
1411 /* keep interrupts disabled until we have the command correctly
1412 * set up so we cannot take a selection interrupt */
1413
1414 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1415 slot->flags != NCR_700_FLAG_AUTOSENSE),
1416 lun);
1417 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1418 * if the negotiated transfer parameters still hold, so
1419 * always renegotiate them */
1420 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1421 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1422 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1423 }
1424
1425 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1426 * If a contingent allegiance condition exists, the device
1427 * will refuse all tags, so send the request sense as untagged
1428 * */
1429 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1430 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1431 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1432 count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1433 }
1434
1435 if(hostdata->fast &&
1436 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1437 count += spi_populate_sync_msg(&hostdata->msgout[count],
1438 spi_period(SCp->device->sdev_target),
1439 spi_offset(SCp->device->sdev_target));
1440 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1441 }
1442
1443 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1444
1445
1446 script_patch_ID(hostdata->dev, hostdata->script,
1447 Device_ID, 1<<scmd_id(SCp));
1448
1449 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1450 slot->pCmd);
1451 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1452 SCp->cmd_len);
1453 /* finally plumb the beginning of the SG list into the script
1454 * */
1455 script_patch_32_abs(hostdata->dev, hostdata->script,
1456 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1457 NCR_700_clear_fifo(SCp->device->host);
1458
1459 if(slot->resume_offset == 0)
1460 slot->resume_offset = hostdata->pScript;
1461 /* now perform all the writebacks and invalidates */
1462 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1463 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1464 DMA_FROM_DEVICE);
1465 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1466 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1467
1468 /* set the synchronous period/offset */
1469 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1470 SCp->device->host, SXFER_REG);
1471 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1472 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1473
1474 return 1;
1475 }
1476
1477 irqreturn_t
NCR_700_intr(int irq,void * dev_id)1478 NCR_700_intr(int irq, void *dev_id)
1479 {
1480 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1481 struct NCR_700_Host_Parameters *hostdata =
1482 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1483 __u8 istat;
1484 __u32 resume_offset = 0;
1485 __u8 pun = 0xff, lun = 0xff;
1486 unsigned long flags;
1487 int handled = 0;
1488
1489 /* Use the host lock to serialise access to the 53c700
1490 * hardware. Note: In future, we may need to take the queue
1491 * lock to enter the done routines. When that happens, we
1492 * need to ensure that for this driver, the host lock and the
1493 * queue lock point to the same thing. */
1494 spin_lock_irqsave(host->host_lock, flags);
1495 if((istat = NCR_700_readb(host, ISTAT_REG))
1496 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1497 __u32 dsps;
1498 __u8 sstat0 = 0, dstat = 0;
1499 __u32 dsp;
1500 struct scsi_cmnd *SCp = hostdata->cmd;
1501 enum NCR_700_Host_State state;
1502
1503 handled = 1;
1504 state = hostdata->state;
1505 SCp = hostdata->cmd;
1506
1507 if(istat & SCSI_INT_PENDING) {
1508 udelay(10);
1509
1510 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1511 }
1512
1513 if(istat & DMA_INT_PENDING) {
1514 udelay(10);
1515
1516 dstat = NCR_700_readb(host, DSTAT_REG);
1517 }
1518
1519 dsps = NCR_700_readl(host, DSPS_REG);
1520 dsp = NCR_700_readl(host, DSP_REG);
1521
1522 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1523 host->host_no, istat, sstat0, dstat,
1524 (dsp - (__u32)(hostdata->pScript))/4,
1525 dsp, dsps));
1526
1527 if(SCp != NULL) {
1528 pun = SCp->device->id;
1529 lun = SCp->device->lun;
1530 }
1531
1532 if(sstat0 & SCSI_RESET_DETECTED) {
1533 struct scsi_device *SDp;
1534 int i;
1535
1536 hostdata->state = NCR_700_HOST_BUSY;
1537
1538 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1539 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1540
1541 scsi_report_bus_reset(host, 0);
1542
1543 /* clear all the negotiated parameters */
1544 __shost_for_each_device(SDp, host)
1545 NCR_700_clear_flag(SDp, ~0);
1546
1547 /* clear all the slots and their pending commands */
1548 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1549 struct scsi_cmnd *SCp;
1550 struct NCR_700_command_slot *slot =
1551 &hostdata->slots[i];
1552
1553 if(slot->state == NCR_700_SLOT_FREE)
1554 continue;
1555
1556 SCp = slot->cmnd;
1557 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1558 slot, SCp);
1559 free_slot(slot, hostdata);
1560 SCp->host_scribble = NULL;
1561 NCR_700_set_depth(SCp->device, 0);
1562 /* NOTE: deadlock potential here: we
1563 * rely on mid-layer guarantees that
1564 * scsi_done won't try to issue the
1565 * command again otherwise we'll
1566 * deadlock on the
1567 * hostdata->state_lock */
1568 SCp->result = DID_RESET << 16;
1569 SCp->scsi_done(SCp);
1570 }
1571 mdelay(25);
1572 NCR_700_chip_setup(host);
1573
1574 hostdata->state = NCR_700_HOST_FREE;
1575 hostdata->cmd = NULL;
1576 /* signal back if this was an eh induced reset */
1577 if(hostdata->eh_complete != NULL)
1578 complete(hostdata->eh_complete);
1579 goto out_unlock;
1580 } else if(sstat0 & SELECTION_TIMEOUT) {
1581 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1582 host->host_no, pun, lun));
1583 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1584 } else if(sstat0 & PHASE_MISMATCH) {
1585 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1586 (struct NCR_700_command_slot *)SCp->host_scribble;
1587
1588 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1589 /* It wants to reply to some part of
1590 * our message */
1591 #ifdef NCR_700_DEBUG
1592 __u32 temp = NCR_700_readl(host, TEMP_REG);
1593 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1594 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1595 #endif
1596 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1597 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1598 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1599 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1600 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1601 int residual = NCR_700_data_residual(host);
1602 int i;
1603 #ifdef NCR_700_DEBUG
1604 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1605
1606 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1607 host->host_no, pun, lun,
1608 SGcount, data_transfer);
1609 scsi_print_command(SCp);
1610 if(residual) {
1611 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1612 host->host_no, pun, lun,
1613 SGcount, data_transfer, residual);
1614 }
1615 #endif
1616 data_transfer += residual;
1617
1618 if(data_transfer != 0) {
1619 int count;
1620 __u32 pAddr;
1621
1622 SGcount--;
1623
1624 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1625 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1626 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1627 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1628 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1629 pAddr += (count - data_transfer);
1630 #ifdef NCR_700_DEBUG
1631 if(pAddr != naddr) {
1632 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1633 }
1634 #endif
1635 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1636 }
1637 /* set the executed moves to nops */
1638 for(i=0; i<SGcount; i++) {
1639 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1640 slot->SG[i].pAddr = 0;
1641 }
1642 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1643 /* and pretend we disconnected after
1644 * the command phase */
1645 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1646 /* make sure all the data is flushed */
1647 NCR_700_flush_fifo(host);
1648 } else {
1649 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1650 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1651 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1652 NCR_700_internal_bus_reset(host);
1653 }
1654
1655 } else if(sstat0 & SCSI_GROSS_ERROR) {
1656 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1657 host->host_no, pun, lun);
1658 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1659 } else if(sstat0 & PARITY_ERROR) {
1660 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1661 host->host_no, pun, lun);
1662 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1663 } else if(dstat & SCRIPT_INT_RECEIVED) {
1664 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1665 host->host_no, pun, lun));
1666 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1667 } else if(dstat & (ILGL_INST_DETECTED)) {
1668 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1669 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1670 host->host_no, pun, lun,
1671 dsp, dsp - hostdata->pScript);
1672 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1673 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1674 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1675 host->host_no, pun, lun, dstat);
1676 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1677 }
1678
1679
1680 /* NOTE: selection interrupt processing MUST occur
1681 * after script interrupt processing to correctly cope
1682 * with the case where we process a disconnect and
1683 * then get reselected before we process the
1684 * disconnection */
1685 if(sstat0 & SELECTED) {
1686 /* FIXME: It currently takes at least FOUR
1687 * interrupts to complete a command that
1688 * disconnects: one for the disconnect, one
1689 * for the reselection, one to get the
1690 * reselection data and one to complete the
1691 * command. If we guess the reselected
1692 * command here and prepare it, we only need
1693 * to get a reselection data interrupt if we
1694 * guessed wrongly. Since the interrupt
1695 * overhead is much greater than the command
1696 * setup, this would be an efficient
1697 * optimisation particularly as we probably
1698 * only have one outstanding command on a
1699 * target most of the time */
1700
1701 resume_offset = process_selection(host, dsp);
1702
1703 }
1704
1705 }
1706
1707 if(resume_offset) {
1708 if(hostdata->state != NCR_700_HOST_BUSY) {
1709 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1710 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1711 hostdata->state = NCR_700_HOST_BUSY;
1712 }
1713
1714 DEBUG(("Attempting to resume at %x\n", resume_offset));
1715 NCR_700_clear_fifo(host);
1716 NCR_700_writel(resume_offset, host, DSP_REG);
1717 }
1718 /* There is probably a technical no-no about this: If we're a
1719 * shared interrupt and we got this interrupt because the
1720 * other device needs servicing not us, we're still going to
1721 * check our queued commands here---of course, there shouldn't
1722 * be any outstanding.... */
1723 if(hostdata->state == NCR_700_HOST_FREE) {
1724 int i;
1725
1726 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1727 /* fairness: always run the queue from the last
1728 * position we left off */
1729 int j = (i + hostdata->saved_slot_position)
1730 % NCR_700_COMMAND_SLOTS_PER_HOST;
1731
1732 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1733 continue;
1734 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1735 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1736 host->host_no, &hostdata->slots[j],
1737 hostdata->slots[j].cmnd));
1738 hostdata->saved_slot_position = j + 1;
1739 }
1740
1741 break;
1742 }
1743 }
1744 out_unlock:
1745 spin_unlock_irqrestore(host->host_lock, flags);
1746 return IRQ_RETVAL(handled);
1747 }
1748
1749 static int
NCR_700_queuecommand_lck(struct scsi_cmnd * SCp,void (* done)(struct scsi_cmnd *))1750 NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1751 {
1752 struct NCR_700_Host_Parameters *hostdata =
1753 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1754 __u32 move_ins;
1755 enum dma_data_direction direction;
1756 struct NCR_700_command_slot *slot;
1757
1758 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1759 /* We're over our allocation, this should never happen
1760 * since we report the max allocation to the mid layer */
1761 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1762 return 1;
1763 }
1764 /* check for untagged commands. We cannot have any outstanding
1765 * commands if we accept them. Commands could be untagged because:
1766 *
1767 * - The tag negotiated bitmap is clear
1768 * - The blk layer sent and untagged command
1769 */
1770 if(NCR_700_get_depth(SCp->device) != 0
1771 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1772 || !(SCp->flags & SCMD_TAGGED))) {
1773 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1774 NCR_700_get_depth(SCp->device));
1775 return SCSI_MLQUEUE_DEVICE_BUSY;
1776 }
1777 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1778 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1779 NCR_700_get_depth(SCp->device));
1780 return SCSI_MLQUEUE_DEVICE_BUSY;
1781 }
1782 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1783
1784 /* begin the command here */
1785 /* no need to check for NULL, test for command_slot_count above
1786 * ensures a slot is free */
1787 slot = find_empty_slot(hostdata);
1788
1789 slot->cmnd = SCp;
1790
1791 SCp->scsi_done = done;
1792 SCp->host_scribble = (unsigned char *)slot;
1793 SCp->SCp.ptr = NULL;
1794 SCp->SCp.buffer = NULL;
1795
1796 #ifdef NCR_700_DEBUG
1797 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1798 scsi_print_command(SCp);
1799 #endif
1800 if ((SCp->flags & SCMD_TAGGED)
1801 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1802 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1803 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1804 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1805 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1806 }
1807
1808 /* here we may have to process an untagged command. The gate
1809 * above ensures that this will be the only one outstanding,
1810 * so clear the tag negotiated bit.
1811 *
1812 * FIXME: This will royally screw up on multiple LUN devices
1813 * */
1814 if (!(SCp->flags & SCMD_TAGGED)
1815 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1816 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1817 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1818 }
1819
1820 if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1821 SCp->device->simple_tags) {
1822 slot->tag = SCp->request->tag;
1823 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1824 slot->tag, slot);
1825 } else {
1826 struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1827
1828 slot->tag = SCSI_NO_TAG;
1829 /* save current command for reselection */
1830 p->current_cmnd = SCp;
1831 }
1832 /* sanity check: some of the commands generated by the mid-layer
1833 * have an eccentric idea of their sc_data_direction */
1834 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1835 SCp->sc_data_direction != DMA_NONE) {
1836 #ifdef NCR_700_DEBUG
1837 printk("53c700: Command");
1838 scsi_print_command(SCp);
1839 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1840 #endif
1841 SCp->sc_data_direction = DMA_NONE;
1842 }
1843
1844 switch (SCp->cmnd[0]) {
1845 case REQUEST_SENSE:
1846 /* clear the internal sense magic */
1847 SCp->cmnd[6] = 0;
1848 /* fall through */
1849 default:
1850 /* OK, get it from the command */
1851 switch(SCp->sc_data_direction) {
1852 case DMA_BIDIRECTIONAL:
1853 default:
1854 printk(KERN_ERR "53c700: Unknown command for data direction ");
1855 scsi_print_command(SCp);
1856
1857 move_ins = 0;
1858 break;
1859 case DMA_NONE:
1860 move_ins = 0;
1861 break;
1862 case DMA_FROM_DEVICE:
1863 move_ins = SCRIPT_MOVE_DATA_IN;
1864 break;
1865 case DMA_TO_DEVICE:
1866 move_ins = SCRIPT_MOVE_DATA_OUT;
1867 break;
1868 }
1869 }
1870
1871 /* now build the scatter gather list */
1872 direction = SCp->sc_data_direction;
1873 if(move_ins != 0) {
1874 int i;
1875 int sg_count;
1876 dma_addr_t vPtr = 0;
1877 struct scatterlist *sg;
1878 __u32 count = 0;
1879
1880 sg_count = scsi_dma_map(SCp);
1881 BUG_ON(sg_count < 0);
1882
1883 scsi_for_each_sg(SCp, sg, sg_count, i) {
1884 vPtr = sg_dma_address(sg);
1885 count = sg_dma_len(sg);
1886
1887 slot->SG[i].ins = bS_to_host(move_ins | count);
1888 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1889 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1890 slot->SG[i].pAddr = bS_to_host(vPtr);
1891 }
1892 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1893 slot->SG[i].pAddr = 0;
1894 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1895 DEBUG((" SETTING %p to %x\n",
1896 (&slot->pSG[i].ins),
1897 slot->SG[i].ins));
1898 }
1899 slot->resume_offset = 0;
1900 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1901 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1902 NCR_700_start_command(SCp);
1903 return 0;
1904 }
1905
DEF_SCSI_QCMD(NCR_700_queuecommand)1906 STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1907
1908 STATIC int
1909 NCR_700_abort(struct scsi_cmnd * SCp)
1910 {
1911 struct NCR_700_command_slot *slot;
1912
1913 scmd_printk(KERN_INFO, SCp, "abort command\n");
1914
1915 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1916
1917 if(slot == NULL)
1918 /* no outstanding command to abort */
1919 return SUCCESS;
1920 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1921 /* FIXME: This is because of a problem in the new
1922 * error handler. When it is in error recovery, it
1923 * will send a TUR to a device it thinks may still be
1924 * showing a problem. If the TUR isn't responded to,
1925 * it will abort it and mark the device off line.
1926 * Unfortunately, it does no other error recovery, so
1927 * this would leave us with an outstanding command
1928 * occupying a slot. Rather than allow this to
1929 * happen, we issue a bus reset to force all
1930 * outstanding commands to terminate here. */
1931 NCR_700_internal_bus_reset(SCp->device->host);
1932 /* still drop through and return failed */
1933 }
1934 return FAILED;
1935
1936 }
1937
1938 STATIC int
NCR_700_host_reset(struct scsi_cmnd * SCp)1939 NCR_700_host_reset(struct scsi_cmnd * SCp)
1940 {
1941 DECLARE_COMPLETION_ONSTACK(complete);
1942 struct NCR_700_Host_Parameters *hostdata =
1943 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1944
1945 scmd_printk(KERN_INFO, SCp,
1946 "New error handler wants HOST reset, cmd %p\n\t", SCp);
1947 scsi_print_command(SCp);
1948
1949 /* In theory, eh_complete should always be null because the
1950 * eh is single threaded, but just in case we're handling a
1951 * reset via sg or something */
1952 spin_lock_irq(SCp->device->host->host_lock);
1953 while (hostdata->eh_complete != NULL) {
1954 spin_unlock_irq(SCp->device->host->host_lock);
1955 msleep_interruptible(100);
1956 spin_lock_irq(SCp->device->host->host_lock);
1957 }
1958
1959 hostdata->eh_complete = &complete;
1960 NCR_700_internal_bus_reset(SCp->device->host);
1961 NCR_700_chip_reset(SCp->device->host);
1962
1963 spin_unlock_irq(SCp->device->host->host_lock);
1964 wait_for_completion(&complete);
1965 spin_lock_irq(SCp->device->host->host_lock);
1966
1967 hostdata->eh_complete = NULL;
1968 /* Revalidate the transport parameters of the failing device */
1969 if(hostdata->fast)
1970 spi_schedule_dv_device(SCp->device);
1971
1972 spin_unlock_irq(SCp->device->host->host_lock);
1973 return SUCCESS;
1974 }
1975
1976 STATIC void
NCR_700_set_period(struct scsi_target * STp,int period)1977 NCR_700_set_period(struct scsi_target *STp, int period)
1978 {
1979 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1980 struct NCR_700_Host_Parameters *hostdata =
1981 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1982
1983 if(!hostdata->fast)
1984 return;
1985
1986 if(period < hostdata->min_period)
1987 period = hostdata->min_period;
1988
1989 spi_period(STp) = period;
1990 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1991 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1992 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1993 }
1994
1995 STATIC void
NCR_700_set_offset(struct scsi_target * STp,int offset)1996 NCR_700_set_offset(struct scsi_target *STp, int offset)
1997 {
1998 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1999 struct NCR_700_Host_Parameters *hostdata =
2000 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2001 int max_offset = hostdata->chip710
2002 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2003
2004 if(!hostdata->fast)
2005 return;
2006
2007 if(offset > max_offset)
2008 offset = max_offset;
2009
2010 /* if we're currently async, make sure the period is reasonable */
2011 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2012 spi_period(STp) > 0xff))
2013 spi_period(STp) = hostdata->min_period;
2014
2015 spi_offset(STp) = offset;
2016 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2017 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2018 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2019 }
2020
2021 STATIC int
NCR_700_slave_alloc(struct scsi_device * SDp)2022 NCR_700_slave_alloc(struct scsi_device *SDp)
2023 {
2024 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2025 GFP_KERNEL);
2026
2027 if (!SDp->hostdata)
2028 return -ENOMEM;
2029
2030 return 0;
2031 }
2032
2033 STATIC int
NCR_700_slave_configure(struct scsi_device * SDp)2034 NCR_700_slave_configure(struct scsi_device *SDp)
2035 {
2036 struct NCR_700_Host_Parameters *hostdata =
2037 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2038
2039 /* to do here: allocate memory; build a queue_full list */
2040 if(SDp->tagged_supported) {
2041 scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2042 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2043 }
2044
2045 if(hostdata->fast) {
2046 /* Find the correct offset and period via domain validation */
2047 if (!spi_initial_dv(SDp->sdev_target))
2048 spi_dv_device(SDp);
2049 } else {
2050 spi_offset(SDp->sdev_target) = 0;
2051 spi_period(SDp->sdev_target) = 0;
2052 }
2053 return 0;
2054 }
2055
2056 STATIC void
NCR_700_slave_destroy(struct scsi_device * SDp)2057 NCR_700_slave_destroy(struct scsi_device *SDp)
2058 {
2059 kfree(SDp->hostdata);
2060 SDp->hostdata = NULL;
2061 }
2062
2063 static int
NCR_700_change_queue_depth(struct scsi_device * SDp,int depth)2064 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2065 {
2066 if (depth > NCR_700_MAX_TAGS)
2067 depth = NCR_700_MAX_TAGS;
2068 return scsi_change_queue_depth(SDp, depth);
2069 }
2070
2071 static ssize_t
NCR_700_show_active_tags(struct device * dev,struct device_attribute * attr,char * buf)2072 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2073 {
2074 struct scsi_device *SDp = to_scsi_device(dev);
2075
2076 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2077 }
2078
2079 static struct device_attribute NCR_700_active_tags_attr = {
2080 .attr = {
2081 .name = "active_tags",
2082 .mode = S_IRUGO,
2083 },
2084 .show = NCR_700_show_active_tags,
2085 };
2086
2087 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2088 &NCR_700_active_tags_attr,
2089 NULL,
2090 };
2091
2092 EXPORT_SYMBOL(NCR_700_detect);
2093 EXPORT_SYMBOL(NCR_700_release);
2094 EXPORT_SYMBOL(NCR_700_intr);
2095
2096 static struct spi_function_template NCR_700_transport_functions = {
2097 .set_period = NCR_700_set_period,
2098 .show_period = 1,
2099 .set_offset = NCR_700_set_offset,
2100 .show_offset = 1,
2101 };
2102
NCR_700_init(void)2103 static int __init NCR_700_init(void)
2104 {
2105 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2106 if(!NCR_700_transport_template)
2107 return -ENODEV;
2108 return 0;
2109 }
2110
NCR_700_exit(void)2111 static void __exit NCR_700_exit(void)
2112 {
2113 spi_release_transport(NCR_700_transport_template);
2114 }
2115
2116 module_init(NCR_700_init);
2117 module_exit(NCR_700_exit);
2118
2119