1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Adaptec AAC series RAID controller driver
4  *	(c) Copyright 2001 Red Hat Inc.
5  *
6  * based on the old aacraid driver that is..
7  * Adaptec aacraid device driver for Linux.
8  *
9  * Copyright (c) 2000-2010 Adaptec, Inc.
10  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12  *
13  * Module Name:
14  *  src.c
15  *
16  * Abstract: Hardware Device Interface for PMC SRC based controllers
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/completion.h>
28 #include <linux/time.h>
29 #include <linux/interrupt.h>
30 #include <scsi/scsi_host.h>
31 
32 #include "aacraid.h"
33 
34 static int aac_src_get_sync_status(struct aac_dev *dev);
35 
aac_src_intr_message(int irq,void * dev_id)36 static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
37 {
38 	struct aac_msix_ctx *ctx;
39 	struct aac_dev *dev;
40 	unsigned long bellbits, bellbits_shifted;
41 	int vector_no;
42 	int isFastResponse, mode;
43 	u32 index, handle;
44 
45 	ctx = (struct aac_msix_ctx *)dev_id;
46 	dev = ctx->dev;
47 	vector_no = ctx->vector_no;
48 
49 	if (dev->msi_enabled) {
50 		mode = AAC_INT_MODE_MSI;
51 		if (vector_no == 0) {
52 			bellbits = src_readl(dev, MUnit.ODR_MSI);
53 			if (bellbits & 0x40000)
54 				mode |= AAC_INT_MODE_AIF;
55 			if (bellbits & 0x1000)
56 				mode |= AAC_INT_MODE_SYNC;
57 		}
58 	} else {
59 		mode = AAC_INT_MODE_INTX;
60 		bellbits = src_readl(dev, MUnit.ODR_R);
61 		if (bellbits & PmDoorBellResponseSent) {
62 			bellbits = PmDoorBellResponseSent;
63 			src_writel(dev, MUnit.ODR_C, bellbits);
64 			src_readl(dev, MUnit.ODR_C);
65 		} else {
66 			bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
67 			src_writel(dev, MUnit.ODR_C, bellbits);
68 			src_readl(dev, MUnit.ODR_C);
69 
70 			if (bellbits_shifted & DoorBellAifPending)
71 				mode |= AAC_INT_MODE_AIF;
72 			else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
73 				mode |= AAC_INT_MODE_SYNC;
74 		}
75 	}
76 
77 	if (mode & AAC_INT_MODE_SYNC) {
78 		unsigned long sflags;
79 		struct list_head *entry;
80 		int send_it = 0;
81 		extern int aac_sync_mode;
82 
83 		if (!aac_sync_mode && !dev->msi_enabled) {
84 			src_writel(dev, MUnit.ODR_C, bellbits);
85 			src_readl(dev, MUnit.ODR_C);
86 		}
87 
88 		if (dev->sync_fib) {
89 			if (dev->sync_fib->callback)
90 				dev->sync_fib->callback(dev->sync_fib->callback_data,
91 					dev->sync_fib);
92 			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
93 			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
94 				dev->management_fib_count--;
95 				complete(&dev->sync_fib->event_wait);
96 			}
97 			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
98 						sflags);
99 			spin_lock_irqsave(&dev->sync_lock, sflags);
100 			if (!list_empty(&dev->sync_fib_list)) {
101 				entry = dev->sync_fib_list.next;
102 				dev->sync_fib = list_entry(entry,
103 							   struct fib,
104 							   fiblink);
105 				list_del(entry);
106 				send_it = 1;
107 			} else {
108 				dev->sync_fib = NULL;
109 			}
110 			spin_unlock_irqrestore(&dev->sync_lock, sflags);
111 			if (send_it) {
112 				aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
113 					(u32)dev->sync_fib->hw_fib_pa,
114 					0, 0, 0, 0, 0,
115 					NULL, NULL, NULL, NULL, NULL);
116 			}
117 		}
118 		if (!dev->msi_enabled)
119 			mode = 0;
120 
121 	}
122 
123 	if (mode & AAC_INT_MODE_AIF) {
124 		/* handle AIF */
125 		if (dev->sa_firmware) {
126 			u32 events = src_readl(dev, MUnit.SCR0);
127 
128 			aac_intr_normal(dev, events, 1, 0, NULL);
129 			writel(events, &dev->IndexRegs->Mailbox[0]);
130 			src_writel(dev, MUnit.IDR, 1 << 23);
131 		} else {
132 			if (dev->aif_thread && dev->fsa_dev)
133 				aac_intr_normal(dev, 0, 2, 0, NULL);
134 		}
135 		if (dev->msi_enabled)
136 			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
137 		mode = 0;
138 	}
139 
140 	if (mode) {
141 		index = dev->host_rrq_idx[vector_no];
142 
143 		for (;;) {
144 			isFastResponse = 0;
145 			/* remove toggle bit (31) */
146 			handle = le32_to_cpu((dev->host_rrq[index])
147 				& 0x7fffffff);
148 			/* check fast response bits (30, 1) */
149 			if (handle & 0x40000000)
150 				isFastResponse = 1;
151 			handle &= 0x0000ffff;
152 			if (handle == 0)
153 				break;
154 			handle >>= 2;
155 			if (dev->msi_enabled && dev->max_msix > 1)
156 				atomic_dec(&dev->rrq_outstanding[vector_no]);
157 			aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
158 			dev->host_rrq[index++] = 0;
159 			if (index == (vector_no + 1) * dev->vector_cap)
160 				index = vector_no * dev->vector_cap;
161 			dev->host_rrq_idx[vector_no] = index;
162 		}
163 		mode = 0;
164 	}
165 
166 	return IRQ_HANDLED;
167 }
168 
169 /**
170  *	aac_src_disable_interrupt	-	Disable interrupts
171  *	@dev: Adapter
172  */
173 
aac_src_disable_interrupt(struct aac_dev * dev)174 static void aac_src_disable_interrupt(struct aac_dev *dev)
175 {
176 	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
177 }
178 
179 /**
180  *	aac_src_enable_interrupt_message	-	Enable interrupts
181  *	@dev: Adapter
182  */
183 
aac_src_enable_interrupt_message(struct aac_dev * dev)184 static void aac_src_enable_interrupt_message(struct aac_dev *dev)
185 {
186 	aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
187 }
188 
189 /**
190  *	src_sync_cmd	-	send a command and wait
191  *	@dev: Adapter
192  *	@command: Command to execute
193  *	@p1: first parameter
194  *	@ret: adapter status
195  *
196  *	This routine will send a synchronous command to the adapter and wait
197  *	for its	completion.
198  */
199 
src_sync_cmd(struct aac_dev * dev,u32 command,u32 p1,u32 p2,u32 p3,u32 p4,u32 p5,u32 p6,u32 * status,u32 * r1,u32 * r2,u32 * r3,u32 * r4)200 static int src_sync_cmd(struct aac_dev *dev, u32 command,
201 	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
202 	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
203 {
204 	unsigned long start;
205 	unsigned long delay;
206 	int ok;
207 
208 	/*
209 	 *	Write the command into Mailbox 0
210 	 */
211 	writel(command, &dev->IndexRegs->Mailbox[0]);
212 	/*
213 	 *	Write the parameters into Mailboxes 1 - 6
214 	 */
215 	writel(p1, &dev->IndexRegs->Mailbox[1]);
216 	writel(p2, &dev->IndexRegs->Mailbox[2]);
217 	writel(p3, &dev->IndexRegs->Mailbox[3]);
218 	writel(p4, &dev->IndexRegs->Mailbox[4]);
219 
220 	/*
221 	 *	Clear the synch command doorbell to start on a clean slate.
222 	 */
223 	if (!dev->msi_enabled)
224 		src_writel(dev,
225 			   MUnit.ODR_C,
226 			   OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
227 
228 	/*
229 	 *	Disable doorbell interrupts
230 	 */
231 	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
232 
233 	/*
234 	 *	Force the completion of the mask register write before issuing
235 	 *	the interrupt.
236 	 */
237 	src_readl(dev, MUnit.OIMR);
238 
239 	/*
240 	 *	Signal that there is a new synch command
241 	 */
242 	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
243 
244 	if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
245 		!dev->in_soft_reset) {
246 		ok = 0;
247 		start = jiffies;
248 
249 		if (command == IOP_RESET_ALWAYS) {
250 			/* Wait up to 10 sec */
251 			delay = 10*HZ;
252 		} else {
253 			/* Wait up to 5 minutes */
254 			delay = 300*HZ;
255 		}
256 		while (time_before(jiffies, start+delay)) {
257 			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
258 			/*
259 			 *	Mon960 will set doorbell0 bit when it has completed the command.
260 			 */
261 			if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
262 				/*
263 				 *	Clear the doorbell.
264 				 */
265 				if (dev->msi_enabled)
266 					aac_src_access_devreg(dev,
267 						AAC_CLEAR_SYNC_BIT);
268 				else
269 					src_writel(dev,
270 						MUnit.ODR_C,
271 						OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
272 				ok = 1;
273 				break;
274 			}
275 			/*
276 			 *	Yield the processor in case we are slow
277 			 */
278 			msleep(1);
279 		}
280 		if (unlikely(ok != 1)) {
281 			/*
282 			 *	Restore interrupt mask even though we timed out
283 			 */
284 			aac_adapter_enable_int(dev);
285 			return -ETIMEDOUT;
286 		}
287 		/*
288 		 *	Pull the synch status from Mailbox 0.
289 		 */
290 		if (status)
291 			*status = readl(&dev->IndexRegs->Mailbox[0]);
292 		if (r1)
293 			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
294 		if (r2)
295 			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
296 		if (r3)
297 			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
298 		if (r4)
299 			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
300 		if (command == GET_COMM_PREFERRED_SETTINGS)
301 			dev->max_msix =
302 				readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
303 		/*
304 		 *	Clear the synch command doorbell.
305 		 */
306 		if (!dev->msi_enabled)
307 			src_writel(dev,
308 				MUnit.ODR_C,
309 				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
310 	}
311 
312 	/*
313 	 *	Restore interrupt mask
314 	 */
315 	aac_adapter_enable_int(dev);
316 	return 0;
317 }
318 
319 /**
320  *	aac_src_interrupt_adapter	-	interrupt adapter
321  *	@dev: Adapter
322  *
323  *	Send an interrupt to the i960 and breakpoint it.
324  */
325 
aac_src_interrupt_adapter(struct aac_dev * dev)326 static void aac_src_interrupt_adapter(struct aac_dev *dev)
327 {
328 	src_sync_cmd(dev, BREAKPOINT_REQUEST,
329 		0, 0, 0, 0, 0, 0,
330 		NULL, NULL, NULL, NULL, NULL);
331 }
332 
333 /**
334  *	aac_src_notify_adapter		-	send an event to the adapter
335  *	@dev: Adapter
336  *	@event: Event to send
337  *
338  *	Notify the i960 that something it probably cares about has
339  *	happened.
340  */
341 
aac_src_notify_adapter(struct aac_dev * dev,u32 event)342 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
343 {
344 	switch (event) {
345 
346 	case AdapNormCmdQue:
347 		src_writel(dev, MUnit.ODR_C,
348 			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
349 		break;
350 	case HostNormRespNotFull:
351 		src_writel(dev, MUnit.ODR_C,
352 			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
353 		break;
354 	case AdapNormRespQue:
355 		src_writel(dev, MUnit.ODR_C,
356 			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
357 		break;
358 	case HostNormCmdNotFull:
359 		src_writel(dev, MUnit.ODR_C,
360 			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
361 		break;
362 	case FastIo:
363 		src_writel(dev, MUnit.ODR_C,
364 			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
365 		break;
366 	case AdapPrintfDone:
367 		src_writel(dev, MUnit.ODR_C,
368 			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
369 		break;
370 	default:
371 		BUG();
372 		break;
373 	}
374 }
375 
376 /**
377  *	aac_src_start_adapter		-	activate adapter
378  *	@dev:	Adapter
379  *
380  *	Start up processing on an i960 based AAC adapter
381  */
382 
aac_src_start_adapter(struct aac_dev * dev)383 static void aac_src_start_adapter(struct aac_dev *dev)
384 {
385 	union aac_init *init;
386 	int i;
387 
388 	 /* reset host_rrq_idx first */
389 	for (i = 0; i < dev->max_msix; i++) {
390 		dev->host_rrq_idx[i] = i * dev->vector_cap;
391 		atomic_set(&dev->rrq_outstanding[i], 0);
392 	}
393 	atomic_set(&dev->msix_counter, 0);
394 	dev->fibs_pushed_no = 0;
395 
396 	init = dev->init;
397 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
398 		init->r8.host_elapsed_seconds =
399 			cpu_to_le32(ktime_get_real_seconds());
400 		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
401 			lower_32_bits(dev->init_pa),
402 			upper_32_bits(dev->init_pa),
403 			sizeof(struct _r8) +
404 			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
405 			0, 0, 0, NULL, NULL, NULL, NULL, NULL);
406 	} else {
407 		init->r7.host_elapsed_seconds =
408 			cpu_to_le32(ktime_get_real_seconds());
409 		// We can only use a 32 bit address here
410 		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
411 			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
412 			NULL, NULL, NULL, NULL, NULL);
413 	}
414 
415 }
416 
417 /**
418  *	aac_src_check_health
419  *	@dev: device to check if healthy
420  *
421  *	Will attempt to determine if the specified adapter is alive and
422  *	capable of handling requests, returning 0 if alive.
423  */
aac_src_check_health(struct aac_dev * dev)424 static int aac_src_check_health(struct aac_dev *dev)
425 {
426 	u32 status = src_readl(dev, MUnit.OMR);
427 
428 	/*
429 	 *	Check to see if the board panic'd.
430 	 */
431 	if (unlikely(status & KERNEL_PANIC))
432 		goto err_blink;
433 
434 	/*
435 	 *	Check to see if the board failed any self tests.
436 	 */
437 	if (unlikely(status & SELF_TEST_FAILED))
438 		goto err_out;
439 
440 	/*
441 	 *	Check to see if the board failed any self tests.
442 	 */
443 	if (unlikely(status & MONITOR_PANIC))
444 		goto err_out;
445 
446 	/*
447 	 *	Wait for the adapter to be up and running.
448 	 */
449 	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
450 		return -3;
451 	/*
452 	 *	Everything is OK
453 	 */
454 	return 0;
455 
456 err_out:
457 	return -1;
458 
459 err_blink:
460 	return (status >> 16) & 0xFF;
461 }
462 
aac_get_vector(struct aac_dev * dev)463 static inline u32 aac_get_vector(struct aac_dev *dev)
464 {
465 	return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
466 }
467 
468 /**
469  *	aac_src_deliver_message
470  *	@fib: fib to issue
471  *
472  *	Will send a fib, returning 0 if successful.
473  */
aac_src_deliver_message(struct fib * fib)474 static int aac_src_deliver_message(struct fib *fib)
475 {
476 	struct aac_dev *dev = fib->dev;
477 	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
478 	u32 fibsize;
479 	dma_addr_t address;
480 	struct aac_fib_xporthdr *pFibX;
481 	int native_hba;
482 #if !defined(writeq)
483 	unsigned long flags;
484 #endif
485 
486 	u16 vector_no;
487 
488 	atomic_inc(&q->numpending);
489 
490 	native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
491 
492 
493 	if (dev->msi_enabled && dev->max_msix > 1 &&
494 		(native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
495 
496 		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
497 			&& dev->sa_firmware)
498 			vector_no = aac_get_vector(dev);
499 		else
500 			vector_no = fib->vector_no;
501 
502 		if (native_hba) {
503 			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
504 				struct aac_hba_tm_req *tm_req;
505 
506 				tm_req = (struct aac_hba_tm_req *)
507 						fib->hw_fib_va;
508 				if (tm_req->iu_type ==
509 					HBA_IU_TYPE_SCSI_TM_REQ) {
510 					((struct aac_hba_tm_req *)
511 						fib->hw_fib_va)->reply_qid
512 							= vector_no;
513 					((struct aac_hba_tm_req *)
514 						fib->hw_fib_va)->request_id
515 							+= (vector_no << 16);
516 				} else {
517 					((struct aac_hba_reset_req *)
518 						fib->hw_fib_va)->reply_qid
519 							= vector_no;
520 					((struct aac_hba_reset_req *)
521 						fib->hw_fib_va)->request_id
522 							+= (vector_no << 16);
523 				}
524 			} else {
525 				((struct aac_hba_cmd_req *)
526 					fib->hw_fib_va)->reply_qid
527 						= vector_no;
528 				((struct aac_hba_cmd_req *)
529 					fib->hw_fib_va)->request_id
530 						+= (vector_no << 16);
531 			}
532 		} else {
533 			fib->hw_fib_va->header.Handle += (vector_no << 16);
534 		}
535 	} else {
536 		vector_no = 0;
537 	}
538 
539 	atomic_inc(&dev->rrq_outstanding[vector_no]);
540 
541 	if (native_hba) {
542 		address = fib->hw_fib_pa;
543 		fibsize = (fib->hbacmd_size + 127) / 128 - 1;
544 		if (fibsize > 31)
545 			fibsize = 31;
546 		address |= fibsize;
547 #if defined(writeq)
548 		src_writeq(dev, MUnit.IQN_L, (u64)address);
549 #else
550 		spin_lock_irqsave(&fib->dev->iq_lock, flags);
551 		src_writel(dev, MUnit.IQN_H,
552 			upper_32_bits(address) & 0xffffffff);
553 		src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
554 		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
555 #endif
556 	} else {
557 		if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
558 			dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
559 			/* Calculate the amount to the fibsize bits */
560 			fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
561 				+ 127) / 128 - 1;
562 			/* New FIB header, 32-bit */
563 			address = fib->hw_fib_pa;
564 			fib->hw_fib_va->header.StructType = FIB_MAGIC2;
565 			fib->hw_fib_va->header.SenderFibAddress =
566 				cpu_to_le32((u32)address);
567 			fib->hw_fib_va->header.u.TimeStamp = 0;
568 			WARN_ON(upper_32_bits(address) != 0L);
569 		} else {
570 			/* Calculate the amount to the fibsize bits */
571 			fibsize = (sizeof(struct aac_fib_xporthdr) +
572 				le16_to_cpu(fib->hw_fib_va->header.Size)
573 				+ 127) / 128 - 1;
574 			/* Fill XPORT header */
575 			pFibX = (struct aac_fib_xporthdr *)
576 				((unsigned char *)fib->hw_fib_va -
577 				sizeof(struct aac_fib_xporthdr));
578 			pFibX->Handle = fib->hw_fib_va->header.Handle;
579 			pFibX->HostAddress =
580 				cpu_to_le64((u64)fib->hw_fib_pa);
581 			pFibX->Size = cpu_to_le32(
582 				le16_to_cpu(fib->hw_fib_va->header.Size));
583 			address = fib->hw_fib_pa -
584 				(u64)sizeof(struct aac_fib_xporthdr);
585 		}
586 		if (fibsize > 31)
587 			fibsize = 31;
588 		address |= fibsize;
589 
590 #if defined(writeq)
591 		src_writeq(dev, MUnit.IQ_L, (u64)address);
592 #else
593 		spin_lock_irqsave(&fib->dev->iq_lock, flags);
594 		src_writel(dev, MUnit.IQ_H,
595 			upper_32_bits(address) & 0xffffffff);
596 		src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
597 		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
598 #endif
599 	}
600 	return 0;
601 }
602 
603 /**
604  *	aac_src_ioremap
605  *	@size: mapping resize request
606  *
607  */
aac_src_ioremap(struct aac_dev * dev,u32 size)608 static int aac_src_ioremap(struct aac_dev *dev, u32 size)
609 {
610 	if (!size) {
611 		iounmap(dev->regs.src.bar1);
612 		dev->regs.src.bar1 = NULL;
613 		iounmap(dev->regs.src.bar0);
614 		dev->base = dev->regs.src.bar0 = NULL;
615 		return 0;
616 	}
617 	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
618 		AAC_MIN_SRC_BAR1_SIZE);
619 	dev->base = NULL;
620 	if (dev->regs.src.bar1 == NULL)
621 		return -1;
622 	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
623 	if (dev->base == NULL) {
624 		iounmap(dev->regs.src.bar1);
625 		dev->regs.src.bar1 = NULL;
626 		return -1;
627 	}
628 	dev->IndexRegs = &((struct src_registers __iomem *)
629 		dev->base)->u.tupelo.IndexRegs;
630 	return 0;
631 }
632 
633 /**
634  *  aac_srcv_ioremap
635  *	@size: mapping resize request
636  *
637  */
aac_srcv_ioremap(struct aac_dev * dev,u32 size)638 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
639 {
640 	if (!size) {
641 		iounmap(dev->regs.src.bar0);
642 		dev->base = dev->regs.src.bar0 = NULL;
643 		return 0;
644 	}
645 
646 	dev->regs.src.bar1 =
647 	ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
648 	dev->base = NULL;
649 	if (dev->regs.src.bar1 == NULL)
650 		return -1;
651 	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
652 	if (dev->base == NULL) {
653 		iounmap(dev->regs.src.bar1);
654 		dev->regs.src.bar1 = NULL;
655 		return -1;
656 	}
657 	dev->IndexRegs = &((struct src_registers __iomem *)
658 		dev->base)->u.denali.IndexRegs;
659 	return 0;
660 }
661 
aac_set_intx_mode(struct aac_dev * dev)662 void aac_set_intx_mode(struct aac_dev *dev)
663 {
664 	if (dev->msi_enabled) {
665 		aac_src_access_devreg(dev, AAC_ENABLE_INTX);
666 		dev->msi_enabled = 0;
667 		msleep(5000); /* Delay 5 seconds */
668 	}
669 }
670 
aac_clear_omr(struct aac_dev * dev)671 static void aac_clear_omr(struct aac_dev *dev)
672 {
673 	u32 omr_value = 0;
674 
675 	omr_value = src_readl(dev, MUnit.OMR);
676 
677 	/*
678 	 * Check for PCI Errors or Kernel Panic
679 	 */
680 	if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
681 		omr_value = 0;
682 
683 	/*
684 	 * Preserve MSIX Value if any
685 	 */
686 	src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
687 	src_readl(dev, MUnit.OMR);
688 }
689 
aac_dump_fw_fib_iop_reset(struct aac_dev * dev)690 static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
691 {
692 	__le32 supported_options3;
693 
694 	if (!aac_fib_dump)
695 		return;
696 
697 	supported_options3  = dev->supplement_adapter_info.supported_options3;
698 	if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
699 		return;
700 
701 	aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
702 			0, 0, 0,  0, 0, 0, NULL, NULL, NULL, NULL, NULL);
703 }
704 
aac_is_ctrl_up_and_running(struct aac_dev * dev)705 static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
706 {
707 	bool ctrl_up = true;
708 	unsigned long status, start;
709 	bool is_up = false;
710 
711 	start = jiffies;
712 	do {
713 		schedule();
714 		status = src_readl(dev, MUnit.OMR);
715 
716 		if (status == 0xffffffff)
717 			status = 0;
718 
719 		if (status & KERNEL_BOOTING) {
720 			start = jiffies;
721 			continue;
722 		}
723 
724 		if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
725 			ctrl_up = false;
726 			break;
727 		}
728 
729 		is_up = status & KERNEL_UP_AND_RUNNING;
730 
731 	} while (!is_up);
732 
733 	return ctrl_up;
734 }
735 
aac_notify_fw_of_iop_reset(struct aac_dev * dev)736 static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
737 {
738 	aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
739 						NULL, NULL, NULL, NULL);
740 }
741 
aac_send_iop_reset(struct aac_dev * dev)742 static void aac_send_iop_reset(struct aac_dev *dev)
743 {
744 	aac_dump_fw_fib_iop_reset(dev);
745 
746 	aac_notify_fw_of_iop_reset(dev);
747 
748 	aac_set_intx_mode(dev);
749 
750 	aac_clear_omr(dev);
751 
752 	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
753 
754 	msleep(5000);
755 }
756 
aac_send_hardware_soft_reset(struct aac_dev * dev)757 static void aac_send_hardware_soft_reset(struct aac_dev *dev)
758 {
759 	u_int32_t val;
760 
761 	aac_clear_omr(dev);
762 	val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
763 	val |= 0x01;
764 	writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
765 	msleep_interruptible(20000);
766 }
767 
aac_src_restart_adapter(struct aac_dev * dev,int bled,u8 reset_type)768 static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
769 {
770 	bool is_ctrl_up;
771 	int ret = 0;
772 
773 	if (bled < 0)
774 		goto invalid_out;
775 
776 	if (bled)
777 		dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
778 
779 	/*
780 	 * When there is a BlinkLED, IOP_RESET has not effect
781 	 */
782 	if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
783 		reset_type &= ~HW_IOP_RESET;
784 
785 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
786 
787 	dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
788 
789 	if (reset_type & HW_IOP_RESET) {
790 		dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
791 		aac_send_iop_reset(dev);
792 
793 		/*
794 		 * Creates a delay or wait till up and running comes thru
795 		 */
796 		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
797 		if (!is_ctrl_up)
798 			dev_err(&dev->pdev->dev, "IOP reset failed\n");
799 		else {
800 			dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
801 			goto set_startup;
802 		}
803 	}
804 
805 	if (!dev->sa_firmware) {
806 		dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
807 		ret = -ENODEV;
808 		goto out;
809 	}
810 
811 	if (reset_type & HW_SOFT_RESET) {
812 		dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
813 		aac_send_hardware_soft_reset(dev);
814 		dev->msi_enabled = 0;
815 
816 		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
817 		if (!is_ctrl_up) {
818 			dev_err(&dev->pdev->dev, "SOFT reset failed\n");
819 			ret = -ENODEV;
820 			goto out;
821 		} else
822 			dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
823 	}
824 
825 set_startup:
826 	if (startup_timeout < 300)
827 		startup_timeout = 300;
828 
829 out:
830 	return ret;
831 
832 invalid_out:
833 	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
834 		ret = -ENODEV;
835 goto out;
836 }
837 
838 /**
839  *	aac_src_select_comm	-	Select communications method
840  *	@dev: Adapter
841  *	@comm: communications method
842  */
aac_src_select_comm(struct aac_dev * dev,int comm)843 static int aac_src_select_comm(struct aac_dev *dev, int comm)
844 {
845 	switch (comm) {
846 	case AAC_COMM_MESSAGE:
847 		dev->a_ops.adapter_intr = aac_src_intr_message;
848 		dev->a_ops.adapter_deliver = aac_src_deliver_message;
849 		break;
850 	default:
851 		return 1;
852 	}
853 	return 0;
854 }
855 
856 /**
857  *  aac_src_init	-	initialize an Cardinal Frey Bar card
858  *  @dev: device to configure
859  *
860  */
861 
aac_src_init(struct aac_dev * dev)862 int aac_src_init(struct aac_dev *dev)
863 {
864 	unsigned long start;
865 	unsigned long status;
866 	int restart = 0;
867 	int instance = dev->id;
868 	const char *name = dev->name;
869 
870 	dev->a_ops.adapter_ioremap = aac_src_ioremap;
871 	dev->a_ops.adapter_comm = aac_src_select_comm;
872 
873 	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
874 	if (aac_adapter_ioremap(dev, dev->base_size)) {
875 		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
876 		goto error_iounmap;
877 	}
878 
879 	/* Failure to reset here is an option ... */
880 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
881 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
882 
883 	if (dev->init_reset) {
884 		dev->init_reset = false;
885 		if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
886 			++restart;
887 	}
888 
889 	/*
890 	 *	Check to see if the board panic'd while booting.
891 	 */
892 	status = src_readl(dev, MUnit.OMR);
893 	if (status & KERNEL_PANIC) {
894 		if (aac_src_restart_adapter(dev,
895 			aac_src_check_health(dev), IOP_HWSOFT_RESET))
896 			goto error_iounmap;
897 		++restart;
898 	}
899 	/*
900 	 *	Check to see if the board failed any self tests.
901 	 */
902 	status = src_readl(dev, MUnit.OMR);
903 	if (status & SELF_TEST_FAILED) {
904 		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
905 			dev->name, instance);
906 		goto error_iounmap;
907 	}
908 	/*
909 	 *	Check to see if the monitor panic'd while booting.
910 	 */
911 	if (status & MONITOR_PANIC) {
912 		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
913 			dev->name, instance);
914 		goto error_iounmap;
915 	}
916 	start = jiffies;
917 	/*
918 	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
919 	 */
920 	while (!((status = src_readl(dev, MUnit.OMR)) &
921 		KERNEL_UP_AND_RUNNING)) {
922 		if ((restart &&
923 		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
924 		  time_after(jiffies, start+HZ*startup_timeout)) {
925 			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
926 					dev->name, instance, status);
927 			goto error_iounmap;
928 		}
929 		if (!restart &&
930 		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
931 		  time_after(jiffies, start + HZ *
932 		  ((startup_timeout > 60)
933 		    ? (startup_timeout - 60)
934 		    : (startup_timeout / 2))))) {
935 			if (likely(!aac_src_restart_adapter(dev,
936 				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
937 				start = jiffies;
938 			++restart;
939 		}
940 		msleep(1);
941 	}
942 	if (restart && aac_commit)
943 		aac_commit = 1;
944 	/*
945 	 *	Fill in the common function dispatch table.
946 	 */
947 	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
948 	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
949 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
950 	dev->a_ops.adapter_notify = aac_src_notify_adapter;
951 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
952 	dev->a_ops.adapter_check_health = aac_src_check_health;
953 	dev->a_ops.adapter_restart = aac_src_restart_adapter;
954 	dev->a_ops.adapter_start = aac_src_start_adapter;
955 
956 	/*
957 	 *	First clear out all interrupts.  Then enable the one's that we
958 	 *	can handle.
959 	 */
960 	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
961 	aac_adapter_disable_int(dev);
962 	src_writel(dev, MUnit.ODR_C, 0xffffffff);
963 	aac_adapter_enable_int(dev);
964 
965 	if (aac_init_adapter(dev) == NULL)
966 		goto error_iounmap;
967 	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
968 		goto error_iounmap;
969 
970 	dev->msi = !pci_enable_msi(dev->pdev);
971 
972 	dev->aac_msix[0].vector_no = 0;
973 	dev->aac_msix[0].dev = dev;
974 
975 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
976 			IRQF_SHARED, "aacraid", &(dev->aac_msix[0]))  < 0) {
977 
978 		if (dev->msi)
979 			pci_disable_msi(dev->pdev);
980 
981 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
982 			name, instance);
983 		goto error_iounmap;
984 	}
985 	dev->dbg_base = pci_resource_start(dev->pdev, 2);
986 	dev->dbg_base_mapped = dev->regs.src.bar1;
987 	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
988 	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
989 
990 	aac_adapter_enable_int(dev);
991 
992 	if (!dev->sync_mode) {
993 		/*
994 		 * Tell the adapter that all is configured, and it can
995 		 * start accepting requests
996 		 */
997 		aac_src_start_adapter(dev);
998 	}
999 	return 0;
1000 
1001 error_iounmap:
1002 
1003 	return -1;
1004 }
1005 
aac_src_wait_sync(struct aac_dev * dev,int * status)1006 static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1007 {
1008 	unsigned long start = jiffies;
1009 	unsigned long usecs = 0;
1010 	int delay = 5 * HZ;
1011 	int rc = 1;
1012 
1013 	while (time_before(jiffies, start+delay)) {
1014 		/*
1015 		 * Delay 5 microseconds to let Mon960 get info.
1016 		 */
1017 		udelay(5);
1018 
1019 		/*
1020 		 * Mon960 will set doorbell0 bit when it has completed the
1021 		 * command.
1022 		 */
1023 		if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1024 			/*
1025 			 * Clear: the doorbell.
1026 			 */
1027 			if (dev->msi_enabled)
1028 				aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1029 			else
1030 				src_writel(dev, MUnit.ODR_C,
1031 					OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
1032 			rc = 0;
1033 
1034 			break;
1035 		}
1036 
1037 		/*
1038 		 * Yield the processor in case we are slow
1039 		 */
1040 		usecs = 1 * USEC_PER_MSEC;
1041 		usleep_range(usecs, usecs + 50);
1042 	}
1043 	/*
1044 	 * Pull the synch status from Mailbox 0.
1045 	 */
1046 	if (status && !rc) {
1047 		status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1048 		status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1049 		status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1050 		status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1051 		status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1052 	}
1053 
1054 	return rc;
1055 }
1056 
1057 /**
1058  *  aac_src_soft_reset	-	perform soft reset to speed up
1059  *  access
1060  *
1061  *  Assumptions: That the controller is in a state where we can
1062  *  bring it back to life with an init struct. We can only use
1063  *  fast sync commands, as the timeout is 5 seconds.
1064  *
1065  *  @dev: device to configure
1066  *
1067  */
1068 
aac_src_soft_reset(struct aac_dev * dev)1069 static int aac_src_soft_reset(struct aac_dev *dev)
1070 {
1071 	u32 status_omr = src_readl(dev, MUnit.OMR);
1072 	u32 status[5];
1073 	int rc = 1;
1074 	int state = 0;
1075 	char *state_str[7] = {
1076 		"GET_ADAPTER_PROPERTIES Failed",
1077 		"GET_ADAPTER_PROPERTIES timeout",
1078 		"SOFT_RESET not supported",
1079 		"DROP_IO Failed",
1080 		"DROP_IO timeout",
1081 		"Check Health failed"
1082 	};
1083 
1084 	if (status_omr == INVALID_OMR)
1085 		return 1;       // pcie hosed
1086 
1087 	if (!(status_omr & KERNEL_UP_AND_RUNNING))
1088 		return 1;       // not up and running
1089 
1090 	/*
1091 	 * We go into soft reset mode to allow us to handle response
1092 	 */
1093 	dev->in_soft_reset = 1;
1094 	dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1095 
1096 	/* Get adapter properties */
1097 	rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1098 		0, 0, 0, status+0, status+1, status+2, status+3, status+4);
1099 	if (rc)
1100 		goto out;
1101 
1102 	state++;
1103 	if (aac_src_wait_sync(dev, status)) {
1104 		rc = 1;
1105 		goto out;
1106 	}
1107 
1108 	state++;
1109 	if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
1110 		(status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
1111 		rc = 2;
1112 		goto out;
1113 	}
1114 
1115 	if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
1116 		(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
1117 		dev->sa_firmware = 1;
1118 
1119 	state++;
1120 	rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1121 		 status+0, status+1, status+2, status+3, status+4);
1122 
1123 	if (rc)
1124 		goto out;
1125 
1126 	state++;
1127 	if (aac_src_wait_sync(dev, status)) {
1128 		rc = 3;
1129 		goto out;
1130 	}
1131 
1132 	if (status[1])
1133 		dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1134 			__func__, status[1]);
1135 
1136 	state++;
1137 	rc = aac_src_check_health(dev);
1138 
1139 out:
1140 	dev->in_soft_reset = 0;
1141 	dev->msi_enabled = 0;
1142 	if (rc)
1143 		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1144 			state_str[state], rc);
1145 
1146 	return rc;
1147 }
1148 /**
1149  *  aac_srcv_init	-	initialize an SRCv card
1150  *  @dev: device to configure
1151  *
1152  */
1153 
aac_srcv_init(struct aac_dev * dev)1154 int aac_srcv_init(struct aac_dev *dev)
1155 {
1156 	unsigned long start;
1157 	unsigned long status;
1158 	int restart = 0;
1159 	int instance = dev->id;
1160 	const char *name = dev->name;
1161 
1162 	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1163 	dev->a_ops.adapter_comm = aac_src_select_comm;
1164 
1165 	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1166 	if (aac_adapter_ioremap(dev, dev->base_size)) {
1167 		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
1168 		goto error_iounmap;
1169 	}
1170 
1171 	/* Failure to reset here is an option ... */
1172 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1173 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1174 
1175 	if (dev->init_reset) {
1176 		dev->init_reset = false;
1177 		if (aac_src_soft_reset(dev)) {
1178 			aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1179 			++restart;
1180 		}
1181 	}
1182 
1183 	/*
1184 	 *	Check to see if flash update is running.
1185 	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
1186 	 */
1187 	status = src_readl(dev, MUnit.OMR);
1188 	if (status & FLASH_UPD_PENDING) {
1189 		start = jiffies;
1190 		do {
1191 			status = src_readl(dev, MUnit.OMR);
1192 			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
1193 				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
1194 					dev->name, instance);
1195 				goto error_iounmap;
1196 			}
1197 		} while (!(status & FLASH_UPD_SUCCESS) &&
1198 			 !(status & FLASH_UPD_FAILED));
1199 		/* Delay 10 seconds.
1200 		 * Because right now FW is doing a soft reset,
1201 		 * do not read scratch pad register at this time
1202 		 */
1203 		ssleep(10);
1204 	}
1205 	/*
1206 	 *	Check to see if the board panic'd while booting.
1207 	 */
1208 	status = src_readl(dev, MUnit.OMR);
1209 	if (status & KERNEL_PANIC) {
1210 		if (aac_src_restart_adapter(dev,
1211 			aac_src_check_health(dev), IOP_HWSOFT_RESET))
1212 			goto error_iounmap;
1213 		++restart;
1214 	}
1215 	/*
1216 	 *	Check to see if the board failed any self tests.
1217 	 */
1218 	status = src_readl(dev, MUnit.OMR);
1219 	if (status & SELF_TEST_FAILED) {
1220 		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1221 		goto error_iounmap;
1222 	}
1223 	/*
1224 	 *	Check to see if the monitor panic'd while booting.
1225 	 */
1226 	if (status & MONITOR_PANIC) {
1227 		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1228 		goto error_iounmap;
1229 	}
1230 
1231 	start = jiffies;
1232 	/*
1233 	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
1234 	 */
1235 	do {
1236 		status = src_readl(dev, MUnit.OMR);
1237 		if (status == INVALID_OMR)
1238 			status = 0;
1239 
1240 		if ((restart &&
1241 		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
1242 		  time_after(jiffies, start+HZ*startup_timeout)) {
1243 			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
1244 					dev->name, instance, status);
1245 			goto error_iounmap;
1246 		}
1247 		if (!restart &&
1248 		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
1249 		  time_after(jiffies, start + HZ *
1250 		  ((startup_timeout > 60)
1251 		    ? (startup_timeout - 60)
1252 		    : (startup_timeout / 2))))) {
1253 			if (likely(!aac_src_restart_adapter(dev,
1254 				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1255 				start = jiffies;
1256 			++restart;
1257 		}
1258 		msleep(1);
1259 	} while (!(status & KERNEL_UP_AND_RUNNING));
1260 
1261 	if (restart && aac_commit)
1262 		aac_commit = 1;
1263 	/*
1264 	 *	Fill in the common function dispatch table.
1265 	 */
1266 	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1267 	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1268 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1269 	dev->a_ops.adapter_notify = aac_src_notify_adapter;
1270 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1271 	dev->a_ops.adapter_check_health = aac_src_check_health;
1272 	dev->a_ops.adapter_restart = aac_src_restart_adapter;
1273 	dev->a_ops.adapter_start = aac_src_start_adapter;
1274 
1275 	/*
1276 	 *	First clear out all interrupts.  Then enable the one's that we
1277 	 *	can handle.
1278 	 */
1279 	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1280 	aac_adapter_disable_int(dev);
1281 	src_writel(dev, MUnit.ODR_C, 0xffffffff);
1282 	aac_adapter_enable_int(dev);
1283 
1284 	if (aac_init_adapter(dev) == NULL)
1285 		goto error_iounmap;
1286 	if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1287 		(dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1288 		goto error_iounmap;
1289 	if (dev->msi_enabled)
1290 		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1291 
1292 	if (aac_acquire_irq(dev))
1293 		goto error_iounmap;
1294 
1295 	dev->dbg_base = pci_resource_start(dev->pdev, 2);
1296 	dev->dbg_base_mapped = dev->regs.src.bar1;
1297 	dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1298 	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1299 
1300 	aac_adapter_enable_int(dev);
1301 
1302 	if (!dev->sync_mode) {
1303 		/*
1304 		 * Tell the adapter that all is configured, and it can
1305 		 * start accepting requests
1306 		 */
1307 		aac_src_start_adapter(dev);
1308 	}
1309 	return 0;
1310 
1311 error_iounmap:
1312 
1313 	return -1;
1314 }
1315 
aac_src_access_devreg(struct aac_dev * dev,int mode)1316 void aac_src_access_devreg(struct aac_dev *dev, int mode)
1317 {
1318 	u_int32_t val;
1319 
1320 	switch (mode) {
1321 	case AAC_ENABLE_INTERRUPT:
1322 		src_writel(dev,
1323 			   MUnit.OIMR,
1324 			   dev->OIMR = (dev->msi_enabled ?
1325 					AAC_INT_ENABLE_TYPE1_MSIX :
1326 					AAC_INT_ENABLE_TYPE1_INTX));
1327 		break;
1328 
1329 	case AAC_DISABLE_INTERRUPT:
1330 		src_writel(dev,
1331 			   MUnit.OIMR,
1332 			   dev->OIMR = AAC_INT_DISABLE_ALL);
1333 		break;
1334 
1335 	case AAC_ENABLE_MSIX:
1336 		/* set bit 6 */
1337 		val = src_readl(dev, MUnit.IDR);
1338 		val |= 0x40;
1339 		src_writel(dev,  MUnit.IDR, val);
1340 		src_readl(dev, MUnit.IDR);
1341 		/* unmask int. */
1342 		val = PMC_ALL_INTERRUPT_BITS;
1343 		src_writel(dev, MUnit.IOAR, val);
1344 		val = src_readl(dev, MUnit.OIMR);
1345 		src_writel(dev,
1346 			   MUnit.OIMR,
1347 			   val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
1348 		break;
1349 
1350 	case AAC_DISABLE_MSIX:
1351 		/* reset bit 6 */
1352 		val = src_readl(dev, MUnit.IDR);
1353 		val &= ~0x40;
1354 		src_writel(dev, MUnit.IDR, val);
1355 		src_readl(dev, MUnit.IDR);
1356 		break;
1357 
1358 	case AAC_CLEAR_AIF_BIT:
1359 		/* set bit 5 */
1360 		val = src_readl(dev, MUnit.IDR);
1361 		val |= 0x20;
1362 		src_writel(dev, MUnit.IDR, val);
1363 		src_readl(dev, MUnit.IDR);
1364 		break;
1365 
1366 	case AAC_CLEAR_SYNC_BIT:
1367 		/* set bit 4 */
1368 		val = src_readl(dev, MUnit.IDR);
1369 		val |= 0x10;
1370 		src_writel(dev, MUnit.IDR, val);
1371 		src_readl(dev, MUnit.IDR);
1372 		break;
1373 
1374 	case AAC_ENABLE_INTX:
1375 		/* set bit 7 */
1376 		val = src_readl(dev, MUnit.IDR);
1377 		val |= 0x80;
1378 		src_writel(dev, MUnit.IDR, val);
1379 		src_readl(dev, MUnit.IDR);
1380 		/* unmask int. */
1381 		val = PMC_ALL_INTERRUPT_BITS;
1382 		src_writel(dev, MUnit.IOAR, val);
1383 		src_readl(dev, MUnit.IOAR);
1384 		val = src_readl(dev, MUnit.OIMR);
1385 		src_writel(dev, MUnit.OIMR,
1386 				val & (~(PMC_GLOBAL_INT_BIT2)));
1387 		break;
1388 
1389 	default:
1390 		break;
1391 	}
1392 }
1393 
aac_src_get_sync_status(struct aac_dev * dev)1394 static int aac_src_get_sync_status(struct aac_dev *dev)
1395 {
1396 	int msix_val = 0;
1397 	int legacy_val = 0;
1398 
1399 	msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1400 
1401 	if (!dev->msi_enabled) {
1402 		/*
1403 		 * if Legacy int status indicates cmd is not complete
1404 		 * sample MSIx register to see if it indiactes cmd complete,
1405 		 * if yes set the controller in MSIx mode and consider cmd
1406 		 * completed
1407 		 */
1408 		legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1409 		if (!(legacy_val & 1) && msix_val)
1410 			dev->msi_enabled = 1;
1411 		return legacy_val;
1412 	}
1413 
1414 	return msix_val;
1415 }
1416